Fix PEP8 issues in ./test/unit/common .

Fix some pep8 issues in
       modified:   test_bufferedhttp.py
       modified:   test_constraints.py
       modified:   test_db.py
       modified:   test_db_replicator.py
       modified:   test_init.py
make the code looks pretty.

Change-Id: I1c374b1ccd4f028c4e4b2e8194a6d1c201d50571
This commit is contained in:
lrqrun 2012-08-31 11:24:46 +08:00
parent 4a2ae2b460
commit 7b664c99e5
5 changed files with 83 additions and 34 deletions

View File

@ -24,6 +24,7 @@ class TestBufferedHTTP(unittest.TestCase):
def test_http_connect(self):
bindsock = listen(('127.0.0.1', 0))
def accept(expected_par):
try:
with Timeout(3):

View File

@ -124,6 +124,7 @@ class TestDatabaseBroker(unittest.TestCase):
DatabaseBroker(':memory:').initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
for key in stub_dict.keys():
del stub_dict[key]
@ -156,6 +157,7 @@ class TestDatabaseBroker(unittest.TestCase):
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
stub_called = [False]
def delete_stub(*a, **kw):
stub_called[0] = True
broker = DatabaseBroker(':memory:')
@ -198,6 +200,7 @@ class TestDatabaseBroker(unittest.TestCase):
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
@ -273,6 +276,7 @@ class TestDatabaseBroker(unittest.TestCase):
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
@ -281,7 +285,8 @@ class TestDatabaseBroker(unittest.TestCase):
pass
with broker.lock():
pass
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'),
timeout=.1)
broker2._initialize = stub
with broker.lock():
got_exc = False
@ -304,6 +309,7 @@ class TestDatabaseBroker(unittest.TestCase):
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
@ -350,6 +356,7 @@ class TestDatabaseBroker(unittest.TestCase):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
@ -372,6 +379,7 @@ class TestDatabaseBroker(unittest.TestCase):
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
@ -408,6 +416,7 @@ class TestDatabaseBroker(unittest.TestCase):
def test_merge_syncs(self):
broker = DatabaseBroker(':memory:')
def stub(*args, **kwargs):
pass
broker._initialize = stub
@ -445,6 +454,7 @@ class TestDatabaseBroker(unittest.TestCase):
broker_uuid = str(uuid4())
broker_metadata = metadata and simplejson.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
@ -688,7 +698,6 @@ class TestContainerBroker(unittest.TestCase):
res = broker.reclaim(normalize_timestamp(time()), time())
broker.delete_db(normalize_timestamp(time()))
def test_delete_object(self):
""" Test swift.common.db.ContainerBroker.delete_object """
broker = ContainerBroker(':memory:', account='a', container='c')
@ -913,7 +922,8 @@ class TestContainerBroker(unittest.TestCase):
def test_get_info(self):
""" Test swift.common.db.ContainerBroker.get_info """
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
@ -962,7 +972,8 @@ class TestContainerBroker(unittest.TestCase):
self.assertEquals(info['x_container_sync_point2'], -1)
def test_set_x_syncs(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
@ -975,7 +986,8 @@ class TestContainerBroker(unittest.TestCase):
self.assertEquals(info['x_container_sync_point2'], 2)
def test_get_report_info(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
@ -1098,7 +1110,7 @@ class TestContainerBroker(unittest.TestCase):
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['2/', '3/'])
listing = broker.list_objects_iter(10, '2/',None, None, '/')
listing = broker.list_objects_iter(10, '2/', None, None, '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/'])
@ -1167,14 +1179,17 @@ class TestContainerBroker(unittest.TestCase):
broker.put_object('/snakes', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
#def list_objects_iter(self, limit, marker, prefix, delimiter, path=None,
# format=None):
#def list_objects_iter(self, limit, marker, prefix, delimiter,
# path=None, format=None):
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
self.assertEquals([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
self.assertEquals([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/a', '/pets/fish/b'])
self.assertEquals([row[0] for row in listing],
['/pets/fish/a', '/pets/fish/b'])
def test_double_check_trailing_delimiter(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter for a
@ -1226,12 +1241,14 @@ class TestContainerBroker(unittest.TestCase):
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hasha = hashlib.md5('%s-%s' % ('a', '0000000001.00000')).digest()
hashb = hashlib.md5('%s-%s' % ('b', '0000000002.00000')).digest()
hashc = ''.join(('%2x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
hashc = ''.join(('%2x' % (ord(a) ^ ord(b)) for a, b in zip(hasha,
hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hashb = hashlib.md5('%s-%s' % ('b', '0000000003.00000')).digest()
hashc = ''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
hashc = ''.join(('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha,
hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_newid(self):
@ -1623,7 +1640,6 @@ class TestAccountBroker(unittest.TestCase):
# self.assert_('z' in containers)
# self.assert_('a' not in containers)
def test_delete_container(self):
""" Test swift.common.db.AccountBroker.delete_container """
broker = AccountBroker(':memory:', account='a')
@ -1674,7 +1690,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
@ -1685,7 +1702,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
@ -1698,7 +1716,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
@ -1710,7 +1729,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
@ -1722,7 +1742,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
dtimestamp)
@ -1752,7 +1773,8 @@ class TestAccountBroker(unittest.TestCase):
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
@ -1936,7 +1958,7 @@ class TestAccountBroker(unittest.TestCase):
('b', '0000000002.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
''.join(('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_container('b', normalize_timestamp(3),
normalize_timestamp(0), 0, 0)
@ -1944,7 +1966,7 @@ class TestAccountBroker(unittest.TestCase):
('b', '0000000003.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
''.join(('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_merge_items(self):

View File

@ -41,10 +41,13 @@ def lock_parent_directory(filename):
class FakeRing:
class Ring:
devs = []
def __init__(self, path, reload_time=15, ring_name=None):
pass
def get_part_nodes(self, part):
return []
def get_more_nodes(self, *args):
return []
@ -66,8 +69,10 @@ class FakeRingWithNodes:
def __init__(self, path, reload_time=15, ring_name=None):
pass
def get_part_nodes(self, part):
return self.devs[:3]
def get_more_nodes(self, *args):
return (d for d in self.devs[3:])
@ -75,6 +80,7 @@ class FakeRingWithNodes:
class FakeProcess:
def __init__(self, *codes):
self.codes = iter(codes)
def __call__(self, *args, **kwargs):
class Failure:
def communicate(innerself):
@ -99,11 +105,14 @@ class ReplHttp:
self.response = response
replicated = False
host = 'localhost'
def replicate(self, *args):
self.replicated = True
class Response:
status = 200
data = self.response
def read(innerself):
return self.response
return Response()
@ -114,6 +123,7 @@ class ChangingMtimesOs:
self.mtime = 0
self.path = self
self.basename = os.path.basename
def getmtime(self, file):
self.mtime += 1
return self.mtime
@ -124,31 +134,41 @@ class FakeBroker:
get_repl_missing_table = False
stub_replication_info = None
db_type = 'container'
def __init__(self, *args, **kwargs):
return None
@contextmanager
def lock(self):
yield True
def get_sync(self, *args, **kwargs):
return 5
def get_syncs(self):
return []
def get_items_since(self, point, *args):
if point == 0:
return [{'ROWID': 1}]
return []
def merge_syncs(self, *args, **kwargs):
self.args = args
def merge_items(self, *args):
self.args = args
def get_replication_info(self):
if self.get_repl_missing_table:
raise Exception('no such table')
if self.stub_replication_info:
return self.stub_replication_info
return {'delete_timestamp': 0, 'put_timestamp': 1, 'count': 0}
def reclaim(self, item_timestamp, sync_timestamp):
pass
def get_info(self):
pass
@ -169,20 +189,23 @@ class TestDBReplicator(unittest.TestCase):
def stub_delete_db(self, object_file):
self.delete_db_calls.append(object_file)
def test_repl_connection(self):
node = {'ip': '127.0.0.1', 'port': 80, 'device': 'sdb1'}
conn = db_replicator.ReplConnection(node, '1234567890', 'abcdefg',
logging.getLogger())
def req(method, path, body, headers):
self.assertEquals(method, 'REPLICATE')
self.assertEquals(headers['Content-Type'], 'application/json')
class Resp:
def read(self): return 'data'
def read(self):
return 'data'
resp = Resp()
conn.request = req
conn.getresponse = lambda *args: resp
self.assertEquals(conn.replicate(1, 2, 3), resp)
def other_req(method, path, body, headers):
raise Exception('blah')
conn.request = other_req
@ -236,7 +259,8 @@ class TestDBReplicator(unittest.TestCase):
'created_at': 100, 'put_timestamp': 0,
'delete_timestamp': 0,
'metadata': {'Test': ('Value', normalize_timestamp(1))}}
replicator._http_connect = lambda *args: ReplHttp('{"id": 3, "point": -1}')
replicator._http_connect = lambda *args: ReplHttp(
'{"id": 3, "point": -1}')
self.assertEquals(replicator._repl_to_node(
fake_node, FakeBroker(), '0', fake_info), True)
@ -337,8 +361,10 @@ class TestDBReplicator(unittest.TestCase):
# self.assertEquals(rpc.dispatch(('drv', 'part', 'hash'), ['op',]
# ).status_int, 507)
# rpc.mount_check = False
# rpc.rsync_then_merge = lambda drive, db_file, args: self.assertEquals(args, ['test1'])
# rpc.complete_rsync = lambda drive, db_file, args: self.assertEquals(args, ['test2'])
# rpc.rsync_then_merge = lambda drive, db_file,
# args: self.assertEquals(args, ['test1'])
# rpc.complete_rsync = lambda drive, db_file,
# args: self.assertEquals(args, ['test2'])
# rpc.dispatch(('drv', 'part', 'hash'), ['rsync_then_merge','test1'])
# rpc.dispatch(('drv', 'part', 'hash'), ['complete_rsync','test2'])
# rpc.dispatch(('drv', 'part', 'hash'), ['other_op',])
@ -364,4 +390,3 @@ class TestDBReplicator(unittest.TestCase):
if __name__ == '__main__':
unittest.main()

View File

@ -17,6 +17,7 @@ import re
import unittest
import swift
class TestVersioning(unittest.TestCase):
def test_canonical_version_is_clean(self):
"""Ensure that a non-clean canonical_version never happens"""