From 8ac292595fc1523254c5703a9aff598c4edbe057 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Fri, 19 Oct 2012 13:50:57 -0700 Subject: [PATCH] changed TRUE_VALUES references to utils.config_true_value() call cleaned up pep8 (v1.3.3) in all files this patch touches Change-Id: I30e8314dfdc23fb70ab83741a548db9905dfccff --- bin/swift-bench | 6 +- bin/swift-dispersion-report | 45 ++++--- swift/account/auditor.py | 7 +- swift/account/reaper.py | 7 +- swift/account/server.py | 7 +- swift/common/bench.py | 6 +- swift/common/daemon.py | 5 +- swift/common/db_replicator.py | 97 ++++++++------- swift/common/middleware/keystoneauth.py | 4 +- swift/common/middleware/proxy_logging.py | 4 +- swift/common/middleware/recon.py | 20 +-- swift/common/middleware/staticweb.py | 58 ++++----- swift/common/middleware/tempauth.py | 8 +- swift/common/utils.py | 9 ++ swift/common/wsgi.py | 17 +-- swift/container/auditor.py | 22 ++-- swift/container/server.py | 97 ++++++++------- swift/container/sync.py | 38 +++--- swift/container/updater.py | 16 +-- swift/obj/auditor.py | 5 +- swift/obj/replicator.py | 8 +- swift/obj/server.py | 148 ++++++++++++---------- swift/obj/updater.py | 5 +- swift/proxy/controllers/base.py | 4 +- swift/proxy/controllers/obj.py | 152 +++++++++++++---------- swift/proxy/server.py | 11 +- test/unit/__init__.py | 4 +- test/unit/common/test_utils.py | 12 ++ 28 files changed, 445 insertions(+), 377 deletions(-) diff --git a/bin/swift-bench b/bin/swift-bench index cd4f7c349a..b358ce71b8 100755 --- a/bin/swift-bench +++ b/bin/swift-bench @@ -23,7 +23,7 @@ from optparse import OptionParser from swift.common.bench import (BenchController, DistributedBenchController, create_containers, delete_containers) -from swift.common.utils import readconf, LogAdapter, TRUE_VALUES +from swift.common.utils import readconf, LogAdapter, config_true_value # The defaults should be sufficient to run swift-bench on a SAIO CONF_DEFAULTS = { @@ -138,7 +138,7 @@ if __name__ == '__main__': options.containers = ['%s_%d' % (options.container_name, i) for i in xrange(int(options.num_containers))] # check boolean options vs config parameter values - if str(options.delete).lower() in TRUE_VALUES: + if config_true_value(str(options.delete).lower()): options.delete = 'yes' else: options.delete = 'no' @@ -170,5 +170,5 @@ if __name__ == '__main__': controller = controller_class(logger, options) controller.run() - if options.delete.lower() in TRUE_VALUES: + if config_true_value(options.delete.lower()): delete_containers(logger, options) diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index 4ff4468226..c1e807c240 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -30,7 +30,7 @@ from eventlet.pools import Pool from swift.common import direct_client from swiftclient import ClientException, Connection, get_auth from swift.common.ring import Ring -from swift.common.utils import compute_eta, get_time_units, TRUE_VALUES +from swift.common.utils import compute_eta, get_time_units, config_true_value unmounted = [] @@ -45,7 +45,8 @@ def get_error_log(prefix): global debug, unmounted, notfound if hasattr(msg_or_exc, 'http_status'): identifier = '%s:%s/%s' % (msg_or_exc.http_host, - msg_or_exc.http_port, msg_or_exc.http_device) + msg_or_exc.http_port, + msg_or_exc.http_device) if msg_or_exc.http_status == 507: if identifier not in unmounted: unmounted.append(identifier) @@ -68,8 +69,8 @@ def get_error_log(prefix): def container_dispersion_report(coropool, connpool, account, container_ring, retries): with connpool.item() as conn: - containers = [c['name'] for c in conn.get_account(prefix='dispersion_', - full_listing=True)[1]] + containers = [c['name'] for c in conn.get_account( + prefix='dispersion_', full_listing=True)[1]] containers_listed = len(containers) if not containers_listed: print >>stderr, 'No containers to query. Has ' \ @@ -88,9 +89,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring, error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: attempts, _junk = direct_client.retry( - direct_client.direct_head_container, node, - part, account, container, error_log=error_log, - retries=retries) + direct_client.direct_head_container, node, part, account, + container, error_log=error_log, retries=retries) retries_done[0] += attempts - 1 found_count += 1 except ClientException, err: @@ -134,11 +134,12 @@ def container_dispersion_report(coropool, connpool, account, container_ring, missing_copies = container_ring.replica_count - copies if container_copies_found[copies]: print missing_string(container_copies_found[copies], - missing_copies, container_ring.replica_count) + missing_copies, + container_ring.replica_count) print '%.02f%% of container copies found (%d of %d)' % ( - value, copies_found, copies_expected) + value, copies_found, copies_expected) print 'Sample represents %.02f%% of the container partition space' % ( - 100.0 * distinct_partitions / container_ring.partition_count) + 100.0 * distinct_partitions / container_ring.partition_count) stdout.flush() return None else: @@ -159,8 +160,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring, container = 'dispersion_objects' with connpool.item() as conn: try: - objects = [o['name'] for o in conn.get_container(container, - prefix='dispersion_', full_listing=True)[1]] + objects = [o['name'] for o in conn.get_container( + container, prefix='dispersion_', full_listing=True)[1]] except ClientException, err: if err.http_status != 404: raise @@ -186,9 +187,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring, error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: attempts, _junk = direct_client.retry( - direct_client.direct_head_object, node, part, - account, container, obj, error_log=error_log, - retries=retries) + direct_client.direct_head_object, node, part, account, + container, obj, error_log=error_log, retries=retries) retries_done[0] += attempts - 1 found_count += 1 except ClientException, err: @@ -232,11 +232,11 @@ def object_dispersion_report(coropool, connpool, account, object_ring, missing_copies = object_ring.replica_count - copies if object_copies_found[copies]: print missing_string(object_copies_found[copies], - missing_copies, object_ring.replica_count) + missing_copies, object_ring.replica_count) print '%.02f%% of object copies found (%d of %d)' % \ - (value, copies_found, copies_expected) + (value, copies_found, copies_expected) print 'Sample represents %.02f%% of the object partition space' % ( - 100.0 * distinct_partitions / object_ring.partition_count) + 100.0 * distinct_partitions / object_ring.partition_count) stdout.flush() return None else: @@ -304,7 +304,7 @@ Usage: %prog [options] [conf_file] dispersion_coverage = int(conf.get('dispersion_coverage', 1)) retries = int(conf.get('retries', 5)) concurrency = int(conf.get('concurrency', 25)) - if options.dump_json or conf.get('dump_json', 'no').lower() in TRUE_VALUES: + if options.dump_json or config_true_value(conf.get('dump_json', 'no')): json_output = True if options.debug: debug = True @@ -316,10 +316,9 @@ Usage: %prog [options] [conf_file] auth_version=conf.get('auth_version', '1.0')) account = url.rsplit('/', 1)[1] connpool = Pool(max_size=concurrency) - connpool.create = lambda: Connection(conf['auth_url'], - conf['auth_user'], conf['auth_key'], - retries=retries, - preauthurl=url, preauthtoken=token) + connpool.create = lambda: Connection( + conf['auth_url'], conf['auth_user'], conf['auth_key'], retries=retries, + preauthurl=url, preauthtoken=token) container_ring = Ring(swift_dir, ring_name='container') object_ring = Ring(swift_dir, ring_name='object') diff --git a/swift/account/auditor.py b/swift/account/auditor.py index 1b6cf5d356..eeb73d8dfe 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -21,7 +21,7 @@ import swift.common.db from swift.account import server as account_server from swift.common.db import AccountBroker from swift.common.utils import get_logger, audit_location_generator, \ - TRUE_VALUES, dump_recon_cache + config_true_value, dump_recon_cache from swift.common.daemon import Daemon from eventlet import Timeout @@ -34,13 +34,12 @@ class AccountAuditor(Daemon): self.conf = conf self.logger = get_logger(conf, log_route='account-auditor') self.devices = conf.get('devices', '/srv/node') - self.mount_check = conf.get('mount_check', 'true').lower() in \ - TRUE_VALUES + self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.interval = int(conf.get('interval', 1800)) self.account_passes = 0 self.account_failures = 0 swift.common.db.DB_PREALLOCATION = \ - conf.get('db_preallocation', 'f').lower() in TRUE_VALUES + config_true_value(conf.get('db_preallocation', 'f')) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "account.recon") diff --git a/swift/account/reaper.py b/swift/account/reaper.py index e17a716ccf..385d95275e 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -27,7 +27,7 @@ from swift.common.db import AccountBroker from swift.common.direct_client import ClientException, \ direct_delete_container, direct_delete_object, direct_get_container from swift.common.ring import Ring -from swift.common.utils import get_logger, whataremyips, TRUE_VALUES +from swift.common.utils import get_logger, whataremyips, config_true_value from swift.common.daemon import Daemon @@ -56,8 +56,7 @@ class AccountReaper(Daemon): self.conf = conf self.logger = get_logger(conf, log_route='account-reaper') self.devices = conf.get('devices', '/srv/node') - self.mount_check = conf.get('mount_check', 'true').lower() in \ - TRUE_VALUES + self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.interval = int(conf.get('interval', 3600)) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.account_ring = None @@ -71,7 +70,7 @@ class AccountReaper(Daemon): sqrt(self.concurrency) self.container_pool = GreenPool(size=self.container_concurrency) swift.common.db.DB_PREALLOCATION = \ - conf.get('db_preallocation', 'f').lower() in TRUE_VALUES + config_true_value(conf.get('db_preallocation', 'f')) self.delay_reaping = int(conf.get('delay_reaping') or 0) def get_account_ring(self): diff --git a/swift/account/server.py b/swift/account/server.py index 62910f1df2..0385319746 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -26,7 +26,7 @@ from eventlet import Timeout import swift.common.db from swift.common.db import AccountBroker from swift.common.utils import get_logger, get_param, hash_path, public, \ - normalize_timestamp, split_path, storage_directory, TRUE_VALUES, \ + normalize_timestamp, split_path, storage_directory, config_true_value, \ validate_device_partition, json from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \ check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE @@ -47,15 +47,14 @@ class AccountController(object): def __init__(self, conf): self.logger = get_logger(conf, log_route='account-server') self.root = conf.get('devices', '/srv/node') - self.mount_check = conf.get('mount_check', 'true').lower() in \ - TRUE_VALUES + self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check, logger=self.logger) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' swift.common.db.DB_PREALLOCATION = \ - conf.get('db_preallocation', 'f').lower() in TRUE_VALUES + config_true_value(conf.get('db_preallocation', 'f')) def _get_account_broker(self, drive, part, account): hsh = hash_path(account) diff --git a/swift/common/bench.py b/swift/common/bench.py index 24e4a18d62..4a4b0a0fe5 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -28,7 +28,7 @@ import eventlet import eventlet.pools from eventlet.green.httplib import CannotSendRequest -from swift.common.utils import TRUE_VALUES, LogAdapter +from swift.common.utils import config_true_value, LogAdapter import swiftclient as client from swift.common import direct_client from swift.common.http import HTTP_CONFLICT @@ -144,7 +144,7 @@ class Bench(object): self.user = conf.user self.key = conf.key self.auth_url = conf.auth - self.use_proxy = conf.use_proxy.lower() in TRUE_VALUES + self.use_proxy = config_true_value(conf.use_proxy) self.auth_version = conf.auth_version self.logger.info("Auth version: %s" % self.auth_version) if self.use_proxy: @@ -314,7 +314,7 @@ class BenchController(object): self.logger = logger self.conf = conf self.names = [] - self.delete = conf.delete.lower() in TRUE_VALUES + self.delete = config_true_value(conf.delete) self.gets = int(conf.num_gets) self.aborted = False diff --git a/swift/common/daemon.py b/swift/common/daemon.py index f90c255488..e28b5b8a4d 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -75,8 +75,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): log_name=kwargs.get('log_name')) # once on command line (i.e. daemonize=false) will over-ride config - once = once or \ - conf.get('daemonize', 'true').lower() not in utils.TRUE_VALUES + once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: @@ -87,7 +86,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): log_route=section_name) # disable fallocate if desired - if conf.get('disable_fallocate', 'no').lower() in utils.TRUE_VALUES: + if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() try: diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 6f73f8470b..328e6638f6 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -29,8 +29,8 @@ import simplejson import swift.common.db from swift.common.utils import get_logger, whataremyips, storage_directory, \ - renamer, mkdirs, lock_parent_directory, TRUE_VALUES, unlink_older_than, \ - dump_recon_cache, rsync_ip + renamer, mkdirs, lock_parent_directory, config_true_value, \ + unlink_older_than, dump_recon_cache, rsync_ip from swift.common import ring from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE from swift.common.bufferedhttp import BufferedHTTPConnection @@ -53,9 +53,9 @@ def quarantine_db(object_file, server_type): ('container' or 'account') """ object_dir = os.path.dirname(object_file) - quarantine_dir = os.path.abspath(os.path.join(object_dir, '..', - '..', '..', '..', 'quarantined', server_type + 's', - os.path.basename(object_dir))) + quarantine_dir = os.path.abspath( + os.path.join(object_dir, '..', '..', '..', '..', 'quarantined', + server_type + 's', os.path.basename(object_dir))) try: renamer(object_dir, quarantine_dir) except OSError, e: @@ -88,7 +88,7 @@ class ReplConnection(BufferedHTTPConnection): try: body = simplejson.dumps(args) self.request('REPLICATE', self.path, body, - {'Content-Type': 'application/json'}) + {'Content-Type': 'application/json'}) response = self.getresponse() response.data = response.read() return response @@ -107,8 +107,7 @@ class Replicator(Daemon): self.conf = conf self.logger = get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') - self.mount_check = conf.get('mount_check', 'true').lower() in \ - ('true', 't', '1', 'on', 'yes', 'y') + self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) @@ -118,13 +117,12 @@ class Replicator(Daemon): self.max_diffs = int(conf.get('max_diffs') or 100) self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) - self.vm_test_mode = conf.get( - 'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1') + self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ - conf.get('db_preallocation', 'f').lower() in TRUE_VALUES + config_true_value(conf.get('db_preallocation', 'f')) self._zero_stats() self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') @@ -149,17 +147,18 @@ class Replicator(Daemon): {'count': self.stats['attempted'], 'time': time.time() - self.stats['start'], 'rate': self.stats['attempted'] / - (time.time() - self.stats['start'] + 0.0000001)}) + (time.time() - self.stats['start'] + 0.0000001)}) self.logger.info(_('Removed %(remove)d dbs') % self.stats) self.logger.info(_('%(success)s successes, %(failure)s failures') - % self.stats) - dump_recon_cache({'replication_stats': self.stats, - 'replication_time': time.time() - self.stats['start'] - }, self.rcache, self.logger) + % self.stats) + dump_recon_cache( + {'replication_stats': self.stats, + 'replication_time': time.time() - self.stats['start']}, + self.rcache, self.logger) self.logger.info(' '.join(['%s:%s' % item for item in - self.stats.items() if item[0] in - ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty', - 'diff_capped')])) + self.stats.items() if item[0] in + ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', + 'empty', 'diff_capped')])) def _rsync_file(self, db_file, remote_file, whole_file=True): """ @@ -185,7 +184,7 @@ class Replicator(Daemon): return proc.returncode == 0 def _rsync_db(self, broker, device, http, local_id, - replicate_method='complete_rsync', replicate_timeout=None): + replicate_method='complete_rsync', replicate_timeout=None): """ Sync a whole db using rsync. @@ -198,18 +197,18 @@ class Replicator(Daemon): """ device_ip = rsync_ip(device['ip']) if self.vm_test_mode: - remote_file = '%s::%s%s/%s/tmp/%s' % (device_ip, - self.server_type, device['port'], device['device'], - local_id) + remote_file = '%s::%s%s/%s/tmp/%s' % ( + device_ip, self.server_type, device['port'], device['device'], + local_id) else: - remote_file = '%s::%s/%s/tmp/%s' % (device_ip, - self.server_type, device['device'], local_id) + remote_file = '%s::%s/%s/tmp/%s' % ( + device_ip, self.server_type, device['device'], local_id) mtime = os.path.getmtime(broker.db_file) if not self._rsync_file(broker.db_file, remote_file): return False # perform block-level sync if the db was modified during the first sync if os.path.exists(broker.db_file + '-journal') or \ - os.path.getmtime(broker.db_file) > mtime: + os.path.getmtime(broker.db_file) > mtime: # grab a lock so nobody else can modify it with broker.lock(): if not self._rsync_file(broker.db_file, remote_file, False): @@ -243,13 +242,15 @@ class Replicator(Daemon): if not response or response.status >= 300 or response.status < 200: if response: self.logger.error(_('ERROR Bad response %(status)s from ' - '%(host)s'), - {'status': response.status, 'host': http.host}) + '%(host)s'), + {'status': response.status, + 'host': http.host}) return False point = objects[-1]['ROWID'] objects = broker.get_items_since(point, self.per_diff) if objects: - self.logger.debug(_('Synchronization for %s has fallen more than ' + self.logger.debug(_( + 'Synchronization for %s has fallen more than ' '%s rows behind; moving on and will try again next pass.') % (broker.db_file, self.max_diffs * self.per_diff)) self.stats['diff_capped'] += 1 @@ -259,7 +260,8 @@ class Replicator(Daemon): response = http.replicate('merge_syncs', sync_table) if response and response.status >= 200 and response.status < 300: broker.merge_syncs([{'remote_id': remote_id, - 'sync_point': point}], incoming=False) + 'sync_point': point}], + incoming=False) return True return False @@ -283,7 +285,8 @@ class Replicator(Daemon): self.stats['hashmatch'] += 1 self.logger.increment('hashmatches') broker.merge_syncs([{'remote_id': rinfo['id'], - 'sync_point': rinfo['point']}], incoming=False) + 'sync_point': rinfo['point']}], + incoming=False) return True def _http_connect(self, node, partition, db_file): @@ -297,7 +300,8 @@ class Replicator(Daemon): :returns: ReplConnection object """ return ReplConnection(node, partition, - os.path.basename(db_file).split('.', 1)[0], self.logger) + os.path.basename(db_file).split('.', 1)[0], + self.logger) def _repl_to_node(self, node, broker, partition, info): """ @@ -319,8 +323,9 @@ class Replicator(Daemon): _('ERROR Unable to connect to remote server: %s'), node) return False with Timeout(self.node_timeout): - response = http.replicate('sync', info['max_row'], info['hash'], - info['id'], info['created_at'], info['put_timestamp'], + response = http.replicate( + 'sync', info['max_row'], info['hash'], info['id'], + info['created_at'], info['put_timestamp'], info['delete_timestamp'], info['metadata']) if not response: return False @@ -341,11 +346,11 @@ class Replicator(Daemon): self.stats['remote_merge'] += 1 self.logger.increment('remote_merges') return self._rsync_db(broker, node, http, info['id'], - replicate_method='rsync_then_merge', - replicate_timeout=(info['count'] / 2000)) + replicate_method='rsync_then_merge', + replicate_timeout=(info['count'] / 2000)) # else send diffs over to the remote server return self._usync_db(max(rinfo['point'], local_sync), - broker, http, rinfo['id'], info['id']) + broker, http, rinfo['id'], info['id']) def _replicate_object(self, partition, object_file, node_id): """ @@ -412,7 +417,8 @@ class Replicator(Daemon): self.logger.error(_('ERROR Remote drive not mounted %s'), node) except (Exception, Timeout): self.logger.exception(_('ERROR syncing %(file)s with node' - ' %(node)s'), {'file': object_file, 'node': node}) + ' %(node)s'), + {'file': object_file, 'node': node}) self.stats['success' if success else 'failure'] += 1 self.logger.increment('successes' if success else 'failures') responses.append(success) @@ -542,7 +548,8 @@ class ReplicatorRpc(object): not os.path.ismount(os.path.join(self.root, drive)): return Response(status='507 %s is not mounted' % drive) db_file = os.path.join(self.root, drive, - storage_directory(self.datadir, partition, hsh), hsh + '.db') + storage_directory(self.datadir, partition, hsh), + hsh + '.db') if op == 'rsync_then_merge': return self.rsync_then_merge(drive, db_file, args) if op == 'complete_rsync': @@ -577,23 +584,23 @@ class ReplicatorRpc(object): timespan = time.time() - timemark if timespan > DEBUG_TIMINGS_THRESHOLD: self.logger.debug(_('replicator-rpc-sync time for ' - 'update_metadata: %.02fs') % timespan) + 'update_metadata: %.02fs') % timespan) if info['put_timestamp'] != put_timestamp or \ - info['created_at'] != created_at or \ - info['delete_timestamp'] != delete_timestamp: + info['created_at'] != created_at or \ + info['delete_timestamp'] != delete_timestamp: timemark = time.time() broker.merge_timestamps( created_at, put_timestamp, delete_timestamp) timespan = time.time() - timemark if timespan > DEBUG_TIMINGS_THRESHOLD: self.logger.debug(_('replicator-rpc-sync time for ' - 'merge_timestamps: %.02fs') % timespan) + 'merge_timestamps: %.02fs') % timespan) timemark = time.time() info['point'] = broker.get_sync(id_) timespan = time.time() - timemark if timespan > DEBUG_TIMINGS_THRESHOLD: self.logger.debug(_('replicator-rpc-sync time for get_sync: ' - '%.02fs') % timespan) + '%.02fs') % timespan) if hash_ == info['hash'] and info['point'] < remote_sync: timemark = time.time() broker.merge_syncs([{'remote_id': id_, @@ -602,7 +609,7 @@ class ReplicatorRpc(object): timespan = time.time() - timemark if timespan > DEBUG_TIMINGS_THRESHOLD: self.logger.debug(_('replicator-rpc-sync time for ' - 'merge_syncs: %.02fs') % timespan) + 'merge_syncs: %.02fs') % timespan) return Response(simplejson.dumps(info)) def merge_syncs(self, broker, args): diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index 1379aee6a8..67274cd3bb 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -91,12 +91,12 @@ class KeystoneAuth(object): self.reseller_admin_role = conf.get('reseller_admin_role', 'ResellerAdmin') config_is_admin = conf.get('is_admin', "false").lower() - self.is_admin = config_is_admin in swift_utils.TRUE_VALUES + self.is_admin = swift_utils.config_true_value(config_is_admin) cfg_synchosts = conf.get('allowed_sync_hosts', '127.0.0.1') self.allowed_sync_hosts = [h.strip() for h in cfg_synchosts.split(',') if h.strip()] config_overrides = conf.get('allow_overrides', 't').lower() - self.allow_overrides = config_overrides in swift_utils.TRUE_VALUES + self.allow_overrides = swift_utils.config_true_value(config_overrides) def __call__(self, environ, start_response): identity = self._keystone_identity(environ) diff --git a/swift/common/middleware/proxy_logging.py b/swift/common/middleware/proxy_logging.py index c479dcb3f8..4a1bf7ea9b 100644 --- a/swift/common/middleware/proxy_logging.py +++ b/swift/common/middleware/proxy_logging.py @@ -42,7 +42,7 @@ from urllib import quote, unquote from swift.common.swob import Request from swift.common.utils import (get_logger, get_remote_client, - get_valid_utf8_str, TRUE_VALUES) + get_valid_utf8_str, config_true_value) class InputProxy(object): @@ -92,7 +92,7 @@ class ProxyLoggingMiddleware(object): def __init__(self, app, conf): self.app = app - self.log_hdrs = conf.get('log_headers', 'no').lower() in TRUE_VALUES + self.log_hdrs = config_true_value(conf.get('log_headers', 'no')) # The leading access_* check is in case someone assumes that # log_statsd_valid_http_methods behaves like the other log_statsd_* diff --git a/swift/common/middleware/recon.py b/swift/common/middleware/recon.py index f426251519..4bcb8f6db6 100644 --- a/swift/common/middleware/recon.py +++ b/swift/common/middleware/recon.py @@ -17,7 +17,7 @@ import errno import os from swift.common.swob import Request, Response -from swift.common.utils import split_path, get_logger, TRUE_VALUES +from swift.common.utils import split_path, get_logger, config_true_value from swift.common.constraints import check_mount from resource import getpagesize from hashlib import md5 @@ -59,8 +59,7 @@ class ReconMiddleware(object): self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz') self.rings = [self.account_ring_path, self.container_ring_path, self.object_ring_path] - self.mount_check = conf.get('mount_check', 'true').lower() \ - in TRUE_VALUES + self.mount_check = config_true_value(conf.get('mount_check', 'true')) def _from_recon_cache(self, cache_keys, cache_file, openr=open): """retrieve values from a recon cache file @@ -159,7 +158,7 @@ class ReconMiddleware(object): if recon_type == 'object': return self._from_recon_cache(['object_expiration_pass', 'expired_last_pass'], - self.object_recon_cache) + self.object_recon_cache) def get_auditor_info(self, recon_type): """get auditor info""" @@ -186,8 +185,8 @@ class ReconMiddleware(object): """list unmounted (failed?) devices""" mountlist = [] for entry in os.listdir(self.devices): - mpoint = {'device': entry, \ - "mounted": check_mount(self.devices, entry)} + mpoint = {'device': entry, + 'mounted': check_mount(self.devices, entry)} if not mpoint['mounted']: mountlist.append(mpoint) return mountlist @@ -202,11 +201,12 @@ class ReconMiddleware(object): capacity = disk.f_bsize * disk.f_blocks available = disk.f_bsize * disk.f_bavail used = disk.f_bsize * (disk.f_blocks - disk.f_bavail) - devices.append({'device': entry, 'mounted': True, \ - 'size': capacity, 'used': used, 'avail': available}) + devices.append({'device': entry, 'mounted': True, + 'size': capacity, 'used': used, + 'avail': available}) else: - devices.append({'device': entry, 'mounted': False, \ - 'size': '', 'used': '', 'avail': ''}) + devices.append({'device': entry, 'mounted': False, + 'size': '', 'used': '', 'avail': ''}) return devices def get_ring_md5(self, openr=open): diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py index d26f087962..e64931acba 100644 --- a/swift/common/middleware/staticweb.py +++ b/swift/common/middleware/staticweb.py @@ -120,9 +120,9 @@ from urllib import unquote, quote as urllib_quote from swift.common.utils import cache_from_env, get_logger, human_readable, \ - split_path, TRUE_VALUES + split_path, config_true_value from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request, \ - WSGIContext + WSGIContext from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound @@ -179,10 +179,11 @@ class _StaticWebContext(WSGIContext): save_response_status = self._response_status save_response_headers = self._response_headers save_response_exc_info = self._response_exc_info - resp = self._app_call(make_pre_authed_env(env, 'GET', - '/%s/%s/%s/%s%s' % (self.version, self.account, self.container, - self._get_status_int(), self._error), - self.agent)) + resp = self._app_call(make_pre_authed_env( + env, 'GET', '/%s/%s/%s/%s%s' % ( + self.version, self.account, self.container, + self._get_status_int(), self._error), + self.agent)) if is_success(self._get_status_int()): start_response(save_response_status, self._response_headers, self._response_exc_info) @@ -210,9 +211,10 @@ class _StaticWebContext(WSGIContext): (self._index, self._error, self._listings, self._listings_css) = cached_data return - resp = make_pre_authed_request(env, 'HEAD', - '/%s/%s/%s' % (self.version, self.account, self.container), - agent=self.agent).get_response(self.app) + resp = make_pre_authed_request( + env, 'HEAD', '/%s/%s/%s' % ( + self.version, self.account, self.container), + agent=self.agent).get_response(self.app) if is_success(resp.status_int): self._index = \ resp.headers.get('x-container-meta-web-index', '').strip() @@ -225,9 +227,9 @@ class _StaticWebContext(WSGIContext): '').strip() if memcache_client: memcache_client.set(memcache_key, - (self._index, self._error, self._listings, - self._listings_css), - timeout=self.cache_timeout) + (self._index, self._error, self._listings, + self._listings_css), + timeout=self.cache_timeout) def _listing(self, env, start_response, prefix=None): """ @@ -237,12 +239,13 @@ class _StaticWebContext(WSGIContext): :param start_response: The original WSGI start_response hook. :param prefix: Any prefix desired for the container listing. """ - if self._listings.lower() not in TRUE_VALUES: + if not config_true_value(self._listings): resp = HTTPNotFound()(env, self._start_response) return self._error_response(resp, env, start_response) - tmp_env = make_pre_authed_env(env, 'GET', - '/%s/%s/%s' % (self.version, self.account, self.container), - self.agent) + tmp_env = make_pre_authed_env( + env, 'GET', '/%s/%s/%s' % ( + self.version, self.account, self.container), + self.agent) tmp_env['QUERY_STRING'] = 'delimiter=/&format=json' if prefix: tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix) @@ -260,14 +263,14 @@ class _StaticWebContext(WSGIContext): return self._error_response(resp, env, start_response) headers = {'Content-Type': 'text/html; charset=UTF-8'} body = '\n' \ + 'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \ '\n' \ ' \n' \ ' Listing of %s\n' % \ cgi.escape(env['PATH_INFO']) if self._listings_css: body += ' \n' % (self._build_css_path(prefix)) + 'href="%s" />\n' % (self._build_css_path(prefix)) else: body += '