Merge "Cleanup tests for auditor invalidating hashes"
This commit is contained in:
commit
cb33660848
@ -267,9 +267,10 @@ class AuditorWorker(object):
|
|||||||
{'obj': location, 'err': err})
|
{'obj': location, 'err': err})
|
||||||
except DiskFileDeleted:
|
except DiskFileDeleted:
|
||||||
# If there is a reclaimable tombstone, we'll invalidate the hash
|
# If there is a reclaimable tombstone, we'll invalidate the hash
|
||||||
# to trigger the replciator to rehash/cleanup this suffix
|
# to trigger the replicator to rehash/cleanup this suffix
|
||||||
ts = df._ondisk_info['ts_info']['timestamp']
|
ts = df._ondisk_info['ts_info']['timestamp']
|
||||||
if (time.time() - float(ts)) > df.manager.reclaim_age:
|
if (not self.zero_byte_only_at_fps and
|
||||||
|
(time.time() - float(ts)) > df.manager.reclaim_age):
|
||||||
df.manager.invalidate_hash(dirname(df._datadir))
|
df.manager.invalidate_hash(dirname(df._datadir))
|
||||||
except DiskFileNotExist:
|
except DiskFileNotExist:
|
||||||
pass
|
pass
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from test import unit
|
from test import unit
|
||||||
import six.moves.cPickle as pickle
|
|
||||||
import unittest
|
import unittest
|
||||||
import mock
|
import mock
|
||||||
import os
|
import os
|
||||||
@ -24,7 +23,7 @@ from shutil import rmtree
|
|||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from tempfile import mkdtemp
|
from tempfile import mkdtemp
|
||||||
import textwrap
|
import textwrap
|
||||||
from os.path import dirname, basename, join
|
from os.path import dirname, basename
|
||||||
from test.unit import (FakeLogger, patch_policies, make_timestamp_iter,
|
from test.unit import (FakeLogger, patch_policies, make_timestamp_iter,
|
||||||
DEFAULT_TEST_EC_TYPE)
|
DEFAULT_TEST_EC_TYPE)
|
||||||
from swift.obj import auditor, replicator
|
from swift.obj import auditor, replicator
|
||||||
@ -746,56 +745,84 @@ class TestAuditor(unittest.TestCase):
|
|||||||
self.auditor.run_audit(**kwargs)
|
self.auditor.run_audit(**kwargs)
|
||||||
self.assertFalse(os.path.exists(self.disk_file._datadir))
|
self.assertFalse(os.path.exists(self.disk_file._datadir))
|
||||||
|
|
||||||
def test_with_tombstone_delete(self):
|
def _audit_tombstone(self, conf, ts_tomb, zero_byte_fps=0):
|
||||||
test_md5 = '098f6bcd4621d373cade4e832627b4f6'
|
self.auditor = auditor.ObjectAuditor(conf)
|
||||||
|
self.auditor.log_time = 0
|
||||||
def do_audit(self, timestamp, invalidate=False):
|
# create tombstone and hashes.pkl file, ensuring the tombstone is not
|
||||||
dir_path = self.disk_file._datadir
|
# reclaimed by mocking time to be the tombstone time
|
||||||
ts_file = os.path.join(dir_path, '%d.ts' % timestamp)
|
with mock.patch('time.time', return_value=float(ts_tomb)):
|
||||||
|
self.disk_file.delete(ts_tomb)
|
||||||
# Create a .ts file
|
self.disk_file.manager.get_hashes(
|
||||||
if not os.path.exists(dir_path):
|
self.devices + '/sda', '0', [], self.disk_file.policy)
|
||||||
mkdirs(dir_path)
|
suffix = basename(dirname(self.disk_file._datadir))
|
||||||
fp = open(ts_file, 'w')
|
part_dir = dirname(dirname(self.disk_file._datadir))
|
||||||
write_metadata(fp, {'X-Timestamp': '%d' % timestamp})
|
# sanity checks...
|
||||||
fp.close()
|
self.assertEqual(['%s.ts' % ts_tomb.internal],
|
||||||
# Create hashes.pkl
|
os.listdir(self.disk_file._datadir))
|
||||||
hash = dirname(dirname(ts_file)) # hash value of ts file
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
suffix = basename(hash)
|
self.assertFalse(os.path.exists(
|
||||||
hashes_pkl = join(os.path.dirname(hash), HASH_FILE)
|
os.path.join(part_dir, HASH_INVALIDATIONS_FILE)))
|
||||||
with open(hashes_pkl, 'wb') as fp:
|
|
||||||
pickle.dump({suffix: test_md5}, fp, 0)
|
|
||||||
# Run auditor
|
# Run auditor
|
||||||
kwargs = {'mode': 'once'}
|
self.auditor.run_audit(mode='once', zero_byte_fps=zero_byte_fps)
|
||||||
self.auditor.run_audit(**kwargs)
|
# sanity check - auditor should not remove tombstone file
|
||||||
# Check if hash invalid file exists
|
self.assertEqual(['%s.ts' % ts_tomb.internal],
|
||||||
hash_invalid = join(dirname(hash), HASH_INVALIDATIONS_FILE)
|
os.listdir(self.disk_file._datadir))
|
||||||
hash_invalid_exists = os.path.exists(hash_invalid)
|
return part_dir, suffix
|
||||||
# If invalidate, fetch value from hashes.invalid
|
|
||||||
if invalidate:
|
def test_non_reclaimable_tombstone(self):
|
||||||
|
# audit with a recent tombstone
|
||||||
|
ts_tomb = Timestamp(time.time() - 55)
|
||||||
|
part_dir, suffix = self._audit_tombstone(self.conf, ts_tomb)
|
||||||
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
|
self.assertFalse(os.path.exists(
|
||||||
|
os.path.join(part_dir, HASH_INVALIDATIONS_FILE)))
|
||||||
|
|
||||||
|
def test_reclaimable_tombstone(self):
|
||||||
|
# audit with a reclaimable tombstone
|
||||||
|
ts_tomb = Timestamp(time.time() - 604800)
|
||||||
|
part_dir, suffix = self._audit_tombstone(self.conf, ts_tomb)
|
||||||
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
|
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
|
||||||
|
self.assertTrue(os.path.exists(hash_invalid))
|
||||||
with open(hash_invalid, 'rb') as fp:
|
with open(hash_invalid, 'rb') as fp:
|
||||||
hash_val = fp.read()
|
hash_val = fp.read()
|
||||||
return hash_invalid_exists, hash_val, suffix
|
self.assertEqual(suffix, hash_val.strip('\n'))
|
||||||
return hash_invalid_exists, ts_file
|
|
||||||
|
|
||||||
self.auditor = auditor.ObjectAuditor(self.conf)
|
def test_non_reclaimable_tombstone_with_custom_reclaim_age(self):
|
||||||
self.auditor.log_time = 0
|
# audit with a tombstone newer than custom reclaim age
|
||||||
|
ts_tomb = Timestamp(time.time() - 604800)
|
||||||
|
conf = dict(self.conf)
|
||||||
|
conf['reclaim_age'] = 2 * 604800
|
||||||
|
part_dir, suffix = self._audit_tombstone(conf, ts_tomb)
|
||||||
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
|
self.assertFalse(os.path.exists(
|
||||||
|
os.path.join(part_dir, HASH_INVALIDATIONS_FILE)))
|
||||||
|
|
||||||
now = time.time()
|
def test_reclaimable_tombstone_with_custom_reclaim_age(self):
|
||||||
|
# audit with a tombstone older than custom reclaim age
|
||||||
|
ts_tomb = Timestamp(time.time() - 55)
|
||||||
|
conf = dict(self.conf)
|
||||||
|
conf['reclaim_age'] = 10
|
||||||
|
part_dir, suffix = self._audit_tombstone(conf, ts_tomb)
|
||||||
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
|
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
|
||||||
|
self.assertTrue(os.path.exists(hash_invalid))
|
||||||
|
with open(hash_invalid, 'rb') as fp:
|
||||||
|
hash_val = fp.read()
|
||||||
|
self.assertEqual(suffix, hash_val.strip('\n'))
|
||||||
|
|
||||||
# audit with a recent tombstone
|
def test_reclaimable_tombstone_with_zero_byte_fps(self):
|
||||||
hash_invalid_exists, ts_file = do_audit(self, now - 55)
|
# audit with a tombstone older than reclaim age by a zero_byte_fps
|
||||||
self.assertFalse(hash_invalid_exists)
|
# worker does not invalidate the hash
|
||||||
os.unlink(ts_file)
|
ts_tomb = Timestamp(time.time() - 604800)
|
||||||
|
part_dir, suffix = self._audit_tombstone(
|
||||||
# audit with a tombstone that is beyond default reclaim_age
|
self.conf, ts_tomb, zero_byte_fps=50)
|
||||||
hash_invalid_exists, hash_val, suffix = do_audit(self, now - (604800),
|
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
|
||||||
True)
|
self.assertFalse(os.path.exists(
|
||||||
self.assertTrue(hash_invalid_exists)
|
os.path.join(part_dir, HASH_INVALIDATIONS_FILE)))
|
||||||
self.assertEqual(hash_val.strip('\n'), suffix)
|
|
||||||
|
|
||||||
def test_auditor_reclaim_age(self):
|
def test_auditor_reclaim_age(self):
|
||||||
# if we don't have access to the replicator config section we'll
|
# if we don't have access to the replicator config section we'll use
|
||||||
# diskfile's default
|
# diskfile's default
|
||||||
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
|
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
|
||||||
self.rcache, self.devices)
|
self.rcache, self.devices)
|
||||||
|
Loading…
Reference in New Issue
Block a user