Merge "Resolve TODO's in test/probe/test_sharder.py"

This commit is contained in:
Zuul 2018-06-20 23:48:23 +00:00 committed by Gerrit Code Review
commit c568b4b100

View File

@ -187,6 +187,11 @@ class BaseTestContainerSharding(ReplProbeTest):
return self.direct_container_op(direct_client.direct_head_container, return self.direct_container_op(direct_client.direct_head_container,
account, container, expect_failure) account, container, expect_failure)
def direct_get_container(self, account=None, container=None,
expect_failure=False):
return self.direct_container_op(direct_client.direct_get_container,
account, container, expect_failure)
def get_storage_dir(self, part, node, account=None, container=None): def get_storage_dir(self, part, node, account=None, container=None):
account = account or self.brain.account account = account or self.brain.account
container = container or self.container_name container = container or self.container_name
@ -583,8 +588,9 @@ class TestContainerSharding(BaseTestContainerSharding):
# received the shard ranges that got defined # received the shard ranges that got defined
found = self.categorize_container_dir_content() found = self.categorize_container_dir_content()
self.assertLengthEqual(found['shard_dbs'], 1) self.assertLengthEqual(found['shard_dbs'], 1)
broker = ContainerBroker(found['shard_dbs'][0]) broker = self.get_broker(self.brain.part, self.brain.nodes[0])
# TODO: assert the shard db is on replica 0 # sanity check - the shard db is on replica 0
self.assertEqual(found['shard_dbs'][0], broker.db_file)
self.assertIs(True, broker.is_root_container()) self.assertIs(True, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state()) self.assertEqual('sharded', broker.get_db_state())
orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
@ -600,11 +606,14 @@ class TestContainerSharding(BaseTestContainerSharding):
broker = ContainerBroker(db_file) broker = ContainerBroker(db_file)
self.assertIs(True, broker.is_root_container()) self.assertIs(True, broker.is_root_container())
self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual('unsharded', broker.get_db_state())
shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
self.assertEqual([ShardRange.CREATED, ShardRange.CREATED],
[sr['state'] for sr in shard_ranges])
# the sharded db had shard range meta_timestamps and state updated # the sharded db had shard range meta_timestamps and state updated
# during cleaving, so we do not expect those to be equal on other # during cleaving, so we do not expect those to be equal on other
# nodes # nodes
self.assert_shard_range_lists_equal( self.assert_shard_range_lists_equal(
orig_root_shard_ranges, broker.get_shard_ranges(), orig_root_shard_ranges, shard_ranges,
excludes=['meta_timestamp', 'state', 'state_timestamp']) excludes=['meta_timestamp', 'state', 'state_timestamp'])
if run_replicators: if run_replicators:
@ -827,7 +836,12 @@ class TestContainerSharding(BaseTestContainerSharding):
# add another object that lands in the first of the new sub-shards # add another object that lands in the first of the new sub-shards
self.put_objects(['alpha']) self.put_objects(['alpha'])
# TODO: assert that alpha is in the first new shard # check that alpha object is in the first new shard
shard_listings = self.direct_get_container(shard_shards[0].account,
shard_shards[0].container)
for node, (hdrs, listing) in shard_listings.items():
with annotate_failure(node):
self.assertIn('alpha', [o['name'] for o in listing])
self.assert_container_listing(['alpha'] + more_obj_names + obj_names) self.assert_container_listing(['alpha'] + more_obj_names + obj_names)
# Run sharders again so things settle. # Run sharders again so things settle.
self.run_sharders(shard_1) self.run_sharders(shard_1)
@ -1029,7 +1043,17 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assertEqual(len(obj_names) * 3 // 5, self.assertEqual(len(obj_names) * 3 // 5,
broker.get_info()['object_count']) broker.get_info()['object_count'])
# TODO: confirm that the updates got redirected to the shards # confirm that the async pending updates got redirected to the shards
for sr in expected_shard_ranges:
shard_listings = self.direct_get_container(sr.account,
sr.container)
for node, (hdrs, listing) in shard_listings.items():
shard_listing_names = [o['name'] for o in listing]
for obj in obj_names[4::5]:
if obj in sr:
self.assertIn(obj, shard_listing_names)
else:
self.assertNotIn(obj, shard_listing_names)
# The entire listing is not yet available - we have two cleaved shard # The entire listing is not yet available - we have two cleaved shard
# ranges, complete with async updates, but for the remainder of the # ranges, complete with async updates, but for the remainder of the
@ -1045,21 +1069,20 @@ class TestContainerSharding(BaseTestContainerSharding):
# there should be something # there should be something
self.assertTrue( self.assertTrue(
[x['name'].encode('utf-8') for x in listing[len(start_listing):]]) [x['name'].encode('utf-8') for x in listing[len(start_listing):]])
# Object count is hard to reason about though! self.assertIn('x-container-object-count', headers)
# TODO: nail down what this *should* be and make sure all containers self.assertEqual(str(len(listing)),
# respond with it! Depending on what you're looking at, this headers['x-container-object-count'])
# could be 0, 1/2, 7/12 (!?), 3/5, 2/3, or 4/5 or all objects! headers, listing = client.get_container(self.url, self.token,
# Apparently, it may not even be present at all! self.container_name,
# self.assertIn('x-container-object-count', headers) query_string='reverse=on')
# self.assertEqual(headers['x-container-object-count'], self.assertEqual([x['name'].encode('utf-8')
# str(len(obj_names) - len(obj_names) // 6)) for x in listing[-len(start_listing):]],
list(reversed(start_listing)))
# TODO: Doesn't work in reverse, yet self.assertIn('x-container-object-count', headers)
# headers, listing = client.get_container(self.url, self.token, self.assertEqual(str(len(listing)),
# self.container_name, headers['x-container-object-count'])
# query_string='reverse=on') self.assertTrue(
# self.assertEqual([x['name'].encode('utf-8') for x in listing], [x['name'].encode('utf-8') for x in listing[:-len(start_listing)]])
# obj_names[::-1])
# Run the sharders again to get everything to settle # Run the sharders again to get everything to settle
self.sharders.once() self.sharders.once()
@ -1211,14 +1234,10 @@ class TestContainerSharding(BaseTestContainerSharding):
# while container servers are down, but proxy has container info in # while container servers are down, but proxy has container info in
# cache from recent listing, put another object; this update will # cache from recent listing, put another object; this update will
# lurk in async pending until the updaters run again # lurk in async pending until the updaters run again; because all
# TODO: because all the root container servers are down and # the root container servers are down and therefore cannot respond
# therefore cannot respond to a GET for a redirect target, the # to a GET for a redirect target, the object update will default to
# object update will default to being targeted at the root # being targeted at the root container
# container; can we provoke an object update that does get targeted
# to the shard, but fails to update shard, so that the async
# pending will first be directed to the shard when the updaters
# run?
self.stop_container_servers() self.stop_container_servers()
self.put_objects([beta]) self.put_objects([beta])
self.brain.servers.start() self.brain.servers.start()
@ -1282,10 +1301,9 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assertEqual(exp_obj_count, obj_count) self.assertEqual(exp_obj_count, obj_count)
# the donor shard is also still intact # the donor shard is also still intact
# TODO: once we have figured out when these redundant donors are donor = orig_shard_ranges[0]
# deleted, test for deletion/clean up
shard_nodes_data = self.direct_get_container_shard_ranges( shard_nodes_data = self.direct_get_container_shard_ranges(
orig_shard_ranges[0].account, orig_shard_ranges[0].container) donor.account, donor.container)
# the donor's shard range will have the acceptor's projected stats # the donor's shard range will have the acceptor's projected stats
obj_count, bytes_used = check_shard_nodes_data( obj_count, bytes_used = check_shard_nodes_data(
shard_nodes_data, expected_state='sharded', expected_shards=1, shard_nodes_data, expected_state='sharded', expected_shards=1,
@ -1293,6 +1311,16 @@ class TestContainerSharding(BaseTestContainerSharding):
# but the donor is empty and so reports zero stats # but the donor is empty and so reports zero stats
self.assertEqual(0, obj_count) self.assertEqual(0, obj_count)
self.assertEqual(0, bytes_used) self.assertEqual(0, bytes_used)
# check the donor own shard range state
part, nodes = self.brain.ring.get_nodes(
donor.account, donor.container)
for node in nodes:
with annotate_failure(node):
broker = self.get_broker(
part, node, donor.account, donor.container)
own_sr = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDED, own_sr.state)
self.assertTrue(own_sr.deleted)
# delete all the second shard's object apart from 'alpha' # delete all the second shard's object apart from 'alpha'
for obj in second_shard_objects: for obj in second_shard_objects: