Fix spelling mistakes

git ls-files | misspellings -f -
Source: https://github.com/lyda/misspell-check

Change-Id: I4132e6a276e44e2a8985238358533d315ee8d9c4
This commit is contained in:
Joe Gordon 2013-02-12 15:38:40 -08:00
parent af3bd46679
commit 45f0502b52
17 changed files with 25 additions and 24 deletions

View File

@ -183,7 +183,7 @@ swift (1.6.0)
substantially affects the JSON output of the dispersion report, and any substantially affects the JSON output of the dispersion report, and any
tools written to consume this output will need to be updated. tools written to consume this output will need to be updated.
* Added Solaris (Illumos) compability * Added Solaris (Illumos) compatibility
* Added -a option to swift-get-nodes to show all handoffs * Added -a option to swift-get-nodes to show all handoffs

View File

@ -106,7 +106,7 @@ class Auditor(object):
if resp.getheader('ETag').strip('"') != calc_hash: if resp.getheader('ETag').strip('"') != calc_hash:
self.object_checksum_mismatch += 1 self.object_checksum_mismatch += 1
consistent = False consistent = False
print ' MD5 doesnt match etag for "%s" on %s/%s' \ print ' MD5 does not match etag for "%s" on %s/%s' \
% (path, node['ip'], node['device']) % (path, node['ip'], node['device'])
etags.append(resp.getheader('ETag')) etags.append(resp.getheader('ETag'))
else: else:

View File

@ -354,7 +354,7 @@ Request URI Description
/recon/sockstat returns consumable info from /proc/net/sockstat|6 /recon/sockstat returns consumable info from /proc/net/sockstat|6
/recon/devices returns list of devices and devices dir i.e. /srv/node /recon/devices returns list of devices and devices dir i.e. /srv/node
/recon/async returns count of async pending /recon/async returns count of async pending
/recon/replication returns object replication times (for backward compatability) /recon/replication returns object replication times (for backward compatibility)
/recon/replication/<type> returns replication info for given type (account, container, object) /recon/replication/<type> returns replication info for given type (account, container, object)
/recon/auditor/<type> returns auditor stats on last reported scan for given type (account, container, object) /recon/auditor/<type> returns auditor stats on last reported scan for given type (account, container, object)
/recon/updater/<type> returns last updater sweep times for given type (container, object) /recon/updater/<type> returns last updater sweep times for given type (container, object)

View File

@ -24,12 +24,12 @@ The supported headers are,
+----------------------------------------------+------------------------------+ +----------------------------------------------+------------------------------+
|X-Container-Meta-Access-Control-Allow-Headers | Headers to be allowed in | |X-Container-Meta-Access-Control-Allow-Headers | Headers to be allowed in |
| | actual request by browser, | | | actual request by browser, |
| | space seperated. | | | space separated. |
+----------------------------------------------+------------------------------+ +----------------------------------------------+------------------------------+
|X-Container-Meta-Access-Control-Expose-Headers| Headers exposed to the user | |X-Container-Meta-Access-Control-Expose-Headers| Headers exposed to the user |
| | agent (e.g. browser) in the | | | agent (e.g. browser) in the |
| | the actual request response. | | | the actual request response. |
| | Space seperated. | | | Space separated. |
+----------------------------------------------+------------------------------+ +----------------------------------------------+------------------------------+
Before a browser issues an actual request it may issue a `preflight request`_. Before a browser issues an actual request it may issue a `preflight request`_.
@ -70,7 +70,7 @@ header,::
-H 'X-Container-Meta-Access-Control-Allow-Origin: http://localhost' \ -H 'X-Container-Meta-Access-Control-Allow-Origin: http://localhost' \
http://192.168.56.3:8080/v1/AUTH_test/cont1 http://192.168.56.3:8080/v1/AUTH_test/cont1
At this point the container is now accessable to CORS clients hosted on At this point the container is now accessible to CORS clients hosted on
http://localhost. Open the test CORS page in your browser. http://localhost. Open the test CORS page in your browser.
#. Populate the Token field #. Populate the Token field

View File

@ -153,7 +153,8 @@ def check_mount(root, drive):
""" """
Verify that the path to the device is a mount point and mounted. This Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidently filling up the root partition. issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted :param root: base path where the devices are mounted
:param drive: drive name to be checked :param drive: drive name to be checked

View File

@ -793,7 +793,7 @@ class ContainerBroker(DatabaseBroker):
return (row[0] == 0) return (row[0] == 0)
def _commit_puts(self, item_list=None): def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files.""" """Handles committing rows in .pending files."""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file): if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return return
if item_list is None: if item_list is None:
@ -1320,7 +1320,7 @@ class AccountBroker(DatabaseBroker):
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp)) WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _commit_puts(self, item_list=None): def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files.""" """Handles committing rows in .pending files."""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file): if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return return
if item_list is None: if item_list is None:

View File

@ -82,7 +82,7 @@ def command(func):
def watch_server_pids(server_pids, interval=1, **kwargs): def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yeilding back those pids that """Monitor a collection of server pids yielding back those pids that
aren't responding to signals. aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on :param server_pids: a dict, lists of pids [int,...] keyed on

View File

@ -56,7 +56,7 @@ Unauthorized and 404 Not Found) will instead serve the
``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for ``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for
requests for paths not found. requests for paths not found.
For psuedo paths that have no <index.name>, this middleware can serve HTML file For pseudo paths that have no <index.name>, this middleware can serve HTML file
listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item
on the container. on the container.

View File

@ -146,7 +146,7 @@ class FallocateWrapper(object):
self.func_name = 'posix_fallocate' self.func_name = 'posix_fallocate'
self.fallocate = noop_libc_function self.fallocate = noop_libc_function
return return
## fallocate is prefered because we need the on-disk size to match ## fallocate is preferred because we need the on-disk size to match
## the allocated size. Older versions of sqlite require that the ## the allocated size. Older versions of sqlite require that the
## two sizes match. However, fallocate is Linux only. ## two sizes match. However, fallocate is Linux only.
for func in ('fallocate', 'posix_fallocate'): for func in ('fallocate', 'posix_fallocate'):
@ -1066,7 +1066,7 @@ def compute_eta(start_time, current_value, final_value):
def iter_devices_partitions(devices_dir, item_type): def iter_devices_partitions(devices_dir, item_type):
""" """
Iterate over partitions accross all devices. Iterate over partitions across all devices.
:param devices_dir: Path to devices :param devices_dir: Path to devices
:param item_type: One of 'accounts', 'containers', or 'objects' :param item_type: One of 'accounts', 'containers', or 'objects'

View File

@ -295,7 +295,7 @@ class ContainerSync(Daemon):
break break
key = hash_path(info['account'], info['container'], key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True) row['name'], raw_digest=True)
# This node will only intially sync out one third of the # This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). This section # objects (if 3 replicas, 1/4 if 4, etc.). This section
# will attempt to sync previously skipped rows in case the # will attempt to sync previously skipped rows in case the
# other nodes didn't succeed. # other nodes didn't succeed.
@ -313,7 +313,7 @@ class ContainerSync(Daemon):
row = rows[0] row = rows[0]
key = hash_path(info['account'], info['container'], key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True) row['name'], raw_digest=True)
# This node will only intially sync out one third of the # This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync # around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't # previously skipped rows in case the other nodes didn't

View File

@ -408,7 +408,7 @@ class Controller(object):
def container_info(self, account, container, account_autocreate=False): def container_info(self, account, container, account_autocreate=False):
""" """
Get container information and thusly verify container existance. Get container information and thusly verify container existence.
This will also make a call to account_info to verify that the This will also make a call to account_info to verify that the
account exists. account exists.

View File

@ -717,15 +717,15 @@ class File(Base):
self.conn.put_start(self.path, hdrs=headers, parms=parms, cfg=cfg) self.conn.put_start(self.path, hdrs=headers, parms=parms, cfg=cfg)
transfered = 0 transferred = 0
buff = data.read(block_size) buff = data.read(block_size)
try: try:
while len(buff) > 0: while len(buff) > 0:
self.conn.put_data(buff) self.conn.put_data(buff)
buff = data.read(block_size) buff = data.read(block_size)
transfered += len(buff) transferred += len(buff)
if callable(callback): if callable(callback):
callback(transfered, self.size) callback(transferred, self.size)
self.conn.put_end() self.conn.put_end()
except socket.timeout, err: except socket.timeout, err:

View File

@ -611,7 +611,7 @@ class TestServer(unittest.TestCase):
self.assertEquals(len(running_pids), 1) self.assertEquals(len(running_pids), 1)
self.assert_(1 in running_pids) self.assert_(1 in running_pids)
self.assert_(2 not in running_pids) self.assert_(2 not in running_pids)
# test persistant running pid files # test persistent running pid files
self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid'))) self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid')))
# test clean up stale pids # test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid') pid_two = self.join_swift_dir('test-server2.pid')

View File

@ -302,7 +302,7 @@ class TestUtils(unittest.TestCase):
lfo.tell() lfo.tell()
def test_parse_options(self): def test_parse_options(self):
# use mkstemp to get a file that is definately on disk # use mkstemp to get a file that is definitely on disk
with NamedTemporaryFile() as f: with NamedTemporaryFile() as f:
conf_file = f.name conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file]) conf, options = utils.parse_options(test_args=[conf_file])

View File

@ -154,7 +154,7 @@ class TestContainerSync(unittest.TestCase):
def test_run_once(self): def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim # This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interm report. # report, the second with no interim report.
time_calls = [0] time_calls = [0]
audit_location_generator_calls = [0] audit_location_generator_calls = [0]

View File

@ -373,7 +373,7 @@ class TestObjectReplicator(unittest.TestCase):
whole_path_from = os.path.join(self.objects, '0', data_dir) whole_path_from = os.path.join(self.objects, '0', data_dir)
hashes_file = os.path.join(self.objects, '0', hashes_file = os.path.join(self.objects, '0',
object_replicator.HASH_FILE) object_replicator.HASH_FILE)
# test that non existant file except caught # test that non existent file except caught
self.assertEquals(object_replicator.invalidate_hash(whole_path_from), self.assertEquals(object_replicator.invalidate_hash(whole_path_from),
None) None)
# test that hashes get cleared # test that hashes get cleared

View File

@ -1462,7 +1462,7 @@ class TestObjectController(unittest.TestCase):
limit = MAX_META_OVERALL_SIZE limit = MAX_META_OVERALL_SIZE
controller = proxy_server.ObjectController(self.app, 'account', controller = proxy_server.ObjectController(self.app, 'account',
'container', 'object') 'container', 'object')
count = limit / 256 # enough to cause the limit to be reched count = limit / 256 # enough to cause the limit to be reached
headers = dict( headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256) (('X-Object-Meta-' + str(i), 'a' * 256)
for i in xrange(count + 1))) for i in xrange(count + 1)))