Random pep8 fixes!
This patch merely fixes a selection of files to the point where pep8 1.3.3 is happy. Most of the errors are indentation related to continued lines (E126, E127, E128), bracket positions (E124) and the use of backslash (E502). Patch 2 fixes David's comments regarding backslash and an odd comment - thanks David! Change-Id: I4fbd77ecf5395743cb96acb95fa946c322c16560
This commit is contained in:
parent
4cf96b3791
commit
9344a4a582
2
setup.py
2
setup.py
@ -98,4 +98,4 @@ setup(
|
|||||||
'filter_factory',
|
'filter_factory',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -58,7 +58,8 @@ def delete_containers(logger, conf):
|
|||||||
except client.ClientException, e:
|
except client.ClientException, e:
|
||||||
if e.http_status != HTTP_CONFLICT:
|
if e.http_status != HTTP_CONFLICT:
|
||||||
logger.warn("Unable to delete container '%s'. "
|
logger.warn("Unable to delete container '%s'. "
|
||||||
"Got http status '%d'." % (container, e.http_status))
|
"Got http status '%d'."
|
||||||
|
% (container, e.http_status))
|
||||||
|
|
||||||
_func_on_containers(logger, conf, 'del_concurrency', _deleter)
|
_func_on_containers(logger, conf, 'del_concurrency', _deleter)
|
||||||
|
|
||||||
@ -179,14 +180,16 @@ class Bench(object):
|
|||||||
self.devices = conf.devices.split()
|
self.devices = conf.devices.split()
|
||||||
self.names = names
|
self.names = names
|
||||||
self.conn_pool = ConnectionPool(self.url,
|
self.conn_pool = ConnectionPool(self.url,
|
||||||
max(self.put_concurrency, self.get_concurrency,
|
max(self.put_concurrency,
|
||||||
|
self.get_concurrency,
|
||||||
self.del_concurrency))
|
self.del_concurrency))
|
||||||
|
|
||||||
def _log_status(self, title):
|
def _log_status(self, title):
|
||||||
total = time.time() - self.beginbeat
|
total = time.time() - self.beginbeat
|
||||||
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
|
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
|
||||||
'%(rate).01f/s'),
|
'%(rate).01f/s'),
|
||||||
{'title': title, 'complete': self.complete, 'fail': self.failures,
|
{'title': title, 'complete': self.complete,
|
||||||
|
'fail': self.failures,
|
||||||
'rate': (float(self.complete) / total)})
|
'rate': (float(self.complete) / total)})
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
@ -366,7 +369,8 @@ class BenchDELETE(Bench):
|
|||||||
else:
|
else:
|
||||||
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
||||||
direct_client.direct_delete_object(node, partition,
|
direct_client.direct_delete_object(node, partition,
|
||||||
self.account, container_name, name)
|
self.account,
|
||||||
|
container_name, name)
|
||||||
except client.ClientException, e:
|
except client.ClientException, e:
|
||||||
self.logger.debug(str(e))
|
self.logger.debug(str(e))
|
||||||
self.failures += 1
|
self.failures += 1
|
||||||
@ -394,7 +398,8 @@ class BenchGET(Bench):
|
|||||||
else:
|
else:
|
||||||
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
||||||
direct_client.direct_get_object(node, partition,
|
direct_client.direct_get_object(node, partition,
|
||||||
self.account, container_name, name)
|
self.account,
|
||||||
|
container_name, name)
|
||||||
except client.ClientException, e:
|
except client.ClientException, e:
|
||||||
self.logger.debug(str(e))
|
self.logger.debug(str(e))
|
||||||
self.failures += 1
|
self.failures += 1
|
||||||
@ -430,11 +435,14 @@ class BenchPUT(Bench):
|
|||||||
if self.use_proxy:
|
if self.use_proxy:
|
||||||
client.put_object(self.url, self.token,
|
client.put_object(self.url, self.token,
|
||||||
container_name, name, source,
|
container_name, name, source,
|
||||||
content_length=len(source), http_conn=conn)
|
content_length=len(source),
|
||||||
|
http_conn=conn)
|
||||||
else:
|
else:
|
||||||
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
node = {'ip': self.ip, 'port': self.port, 'device': device}
|
||||||
direct_client.direct_put_object(node, partition,
|
direct_client.direct_put_object(node, partition,
|
||||||
self.account, container_name, name, source,
|
self.account,
|
||||||
|
container_name, name,
|
||||||
|
source,
|
||||||
content_length=len(source))
|
content_length=len(source))
|
||||||
except client.ClientException, e:
|
except client.ClientException, e:
|
||||||
self.logger.debug(str(e))
|
self.logger.debug(str(e))
|
||||||
|
@ -102,8 +102,9 @@ class BufferedHTTPConnection(HTTPConnection):
|
|||||||
response = HTTPConnection.getresponse(self)
|
response = HTTPConnection.getresponse(self)
|
||||||
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
|
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
|
||||||
"%(host)s:%(port)s %(path)s)"),
|
"%(host)s:%(port)s %(path)s)"),
|
||||||
{'time': time.time() - self._connected_time, 'method': self._method,
|
{'time': time.time() - self._connected_time,
|
||||||
'host': self.host, 'port': self.port, 'path': self._path})
|
'method': self._method, 'host': self.host,
|
||||||
|
'port': self.port, 'path': self._path})
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
@ -83,7 +83,8 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
|
|||||||
logger = kwargs.pop('logger')
|
logger = kwargs.pop('logger')
|
||||||
else:
|
else:
|
||||||
logger = utils.get_logger(conf, conf.get('log_name', section_name),
|
logger = utils.get_logger(conf, conf.get('log_name', section_name),
|
||||||
log_to_console=kwargs.pop('verbose', False), log_route=section_name)
|
log_to_console=kwargs.pop('verbose', False),
|
||||||
|
log_route=section_name)
|
||||||
|
|
||||||
# disable fallocate if desired
|
# disable fallocate if desired
|
||||||
if conf.get('disable_fallocate', 'no').lower() in utils.TRUE_VALUES:
|
if conf.get('disable_fallocate', 'no').lower() in utils.TRUE_VALUES:
|
||||||
|
@ -31,9 +31,9 @@ RUN_DIR = '/var/run/swift'
|
|||||||
# auth-server has been removed from ALL_SERVERS, start it explicitly
|
# auth-server has been removed from ALL_SERVERS, start it explicitly
|
||||||
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
||||||
'container-replicator', 'container-server', 'container-sync',
|
'container-replicator', 'container-server', 'container-sync',
|
||||||
'container-updater', 'object-auditor', 'object-server', 'object-expirer',
|
'container-updater', 'object-auditor', 'object-server',
|
||||||
'object-replicator', 'object-updater', 'proxy-server',
|
'object-expirer', 'object-replicator', 'object-updater',
|
||||||
'account-replicator', 'account-reaper']
|
'proxy-server', 'account-replicator', 'account-reaper']
|
||||||
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
|
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
|
||||||
'object-server']
|
'object-server']
|
||||||
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
||||||
@ -219,7 +219,8 @@ class Manager():
|
|||||||
# keep track of the pids yeiled back as killed for all servers
|
# keep track of the pids yeiled back as killed for all servers
|
||||||
killed_pids = set()
|
killed_pids = set()
|
||||||
for server, killed_pid in watch_server_pids(server_pids,
|
for server, killed_pid in watch_server_pids(server_pids,
|
||||||
interval=KILL_WAIT, **kwargs):
|
interval=KILL_WAIT,
|
||||||
|
**kwargs):
|
||||||
print _("%s (%s) appears to have stopped") % (server, killed_pid)
|
print _("%s (%s) appears to have stopped") % (server, killed_pid)
|
||||||
killed_pids.add(killed_pid)
|
killed_pids.add(killed_pid)
|
||||||
if not killed_pids.symmetric_difference(signaled_pids):
|
if not killed_pids.symmetric_difference(signaled_pids):
|
||||||
@ -360,8 +361,8 @@ class Server():
|
|||||||
"""
|
"""
|
||||||
if self.server in STANDALONE_SERVERS:
|
if self.server in STANDALONE_SERVERS:
|
||||||
return pid_file.replace(
|
return pid_file.replace(
|
||||||
os.path.normpath(RUN_DIR), SWIFT_DIR, 1).rsplit(
|
os.path.normpath(RUN_DIR), SWIFT_DIR, 1)\
|
||||||
'.pid', 1)[0] + '.conf'
|
.rsplit('.pid', 1)[0] + '.conf'
|
||||||
else:
|
else:
|
||||||
return pid_file.replace(
|
return pid_file.replace(
|
||||||
os.path.normpath(RUN_DIR), SWIFT_DIR, 1).replace(
|
os.path.normpath(RUN_DIR), SWIFT_DIR, 1).replace(
|
||||||
|
@ -156,7 +156,7 @@ class MemcacheRing(object):
|
|||||||
flags |= JSON_FLAG
|
flags |= JSON_FLAG
|
||||||
for (server, fp, sock) in self._get_conns(key):
|
for (server, fp, sock) in self._get_conns(key):
|
||||||
try:
|
try:
|
||||||
sock.sendall('set %s %d %d %s noreply\r\n%s\r\n' % \
|
sock.sendall('set %s %d %d %s noreply\r\n%s\r\n' %
|
||||||
(key, flags, timeout, len(value), value))
|
(key, flags, timeout, len(value), value))
|
||||||
self._return_conn(server, fp, sock)
|
self._return_conn(server, fp, sock)
|
||||||
return
|
return
|
||||||
@ -225,7 +225,7 @@ class MemcacheRing(object):
|
|||||||
add_val = delta
|
add_val = delta
|
||||||
if command == 'decr':
|
if command == 'decr':
|
||||||
add_val = '0'
|
add_val = '0'
|
||||||
sock.sendall('add %s %d %d %s\r\n%s\r\n' % \
|
sock.sendall('add %s %d %d %s\r\n%s\r\n' %
|
||||||
(key, 0, timeout, len(add_val), add_val))
|
(key, 0, timeout, len(add_val), add_val))
|
||||||
line = fp.readline().strip().split()
|
line = fp.readline().strip().split()
|
||||||
if line[0].upper() == 'NOT_STORED':
|
if line[0].upper() == 'NOT_STORED':
|
||||||
|
@ -108,7 +108,8 @@ def clean_acl(name, value):
|
|||||||
second = second[1:].strip()
|
second = second[1:].strip()
|
||||||
if not second or second == '.':
|
if not second or second == '.':
|
||||||
raise ValueError('No host/domain value after referrer '
|
raise ValueError('No host/domain value after referrer '
|
||||||
'designation in ACL: %s' % repr(raw_value))
|
'designation in ACL: %s' %
|
||||||
|
repr(raw_value))
|
||||||
values.append('.r:%s%s' % (negate and '-' or '', second))
|
values.append('.r:%s%s' % (negate and '-' or '', second))
|
||||||
else:
|
else:
|
||||||
raise ValueError('Unknown designator %s in ACL: %s' %
|
raise ValueError('Unknown designator %s in ACL: %s' %
|
||||||
@ -156,8 +157,8 @@ def referrer_allowed(referrer, referrer_acl):
|
|||||||
for mhost in referrer_acl:
|
for mhost in referrer_acl:
|
||||||
if mhost[0] == '-':
|
if mhost[0] == '-':
|
||||||
mhost = mhost[1:]
|
mhost = mhost[1:]
|
||||||
if mhost == rhost or \
|
if mhost == rhost or (mhost[0] == '.' and
|
||||||
(mhost[0] == '.' and rhost.endswith(mhost)):
|
rhost.endswith(mhost)):
|
||||||
allow = False
|
allow = False
|
||||||
elif mhost == '*' or mhost == rhost or \
|
elif mhost == '*' or mhost == rhost or \
|
||||||
(mhost[0] == '.' and rhost.endswith(mhost)):
|
(mhost[0] == '.' and rhost.endswith(mhost)):
|
||||||
|
@ -67,6 +67,7 @@ for k in default_constraints:
|
|||||||
# tests.
|
# tests.
|
||||||
config[k] = '%s constraint is not defined' % k
|
config[k] = '%s constraint is not defined' % k
|
||||||
|
|
||||||
|
|
||||||
def load_constraint(name):
|
def load_constraint(name):
|
||||||
c = config[name]
|
c = config[name]
|
||||||
if not isinstance(c, int):
|
if not isinstance(c, int):
|
||||||
|
@ -93,8 +93,8 @@ class TestEmptyDevice(TestCase):
|
|||||||
for node in onodes[1:]:
|
for node in onodes[1:]:
|
||||||
start_server(node['port'], self.port2server, self.pids)
|
start_server(node['port'], self.port2server, self.pids)
|
||||||
self.assertFalse(os.path.exists(obj_dir))
|
self.assertFalse(os.path.exists(obj_dir))
|
||||||
# We've indirectly verified the handoff node has the object, but let's
|
# We've indirectly verified the handoff node has the object, but
|
||||||
# directly verify it.
|
# let's directly verify it.
|
||||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||||
odata = direct_client.direct_get_object(
|
odata = direct_client.direct_get_object(
|
||||||
another_onode, opart, self.account, container, obj)[-1]
|
another_onode, opart, self.account, container, obj)[-1]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user