Have check_drive raise ValueError on errors
...which helps us differentiate between a drive that's not mounted vs. not a dir better in log messages. We were already doing that a bit in diskfile.py, and it seems like a useful distinction; let's do it more. While we're at it, remove some log translations. Related-Change: I941ffbc568ebfa5964d49964dc20c382a5e2ec2a Related-Change: I3362a6ebff423016bb367b4b6b322bb41ae08764 Change-Id: Ife0d34f9482adb4524d1ab1fe6c335c6b287c2fd Partial-Bug: 1674543
This commit is contained in:
parent
c568b4b100
commit
f192f51d37
@ -134,10 +134,11 @@ class AccountReaper(Daemon):
|
||||
begin = time()
|
||||
try:
|
||||
for device in os.listdir(self.devices):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
try:
|
||||
check_drive(self.devices, device, self.mount_check)
|
||||
except ValueError as err:
|
||||
self.logger.increment('errors')
|
||||
self.logger.debug(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
self.logger.debug('Skipping: %s', err)
|
||||
continue
|
||||
self.reap_device(device)
|
||||
except (Exception, Timeout):
|
||||
|
@ -88,7 +88,9 @@ class AccountController(BaseStorageServer):
|
||||
def DELETE(self, req):
|
||||
"""Handle HTTP DELETE request."""
|
||||
drive, part, account = split_and_validate_path(req, 3)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
req_timestamp = valid_timestamp(req)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
@ -102,7 +104,9 @@ class AccountController(BaseStorageServer):
|
||||
def PUT(self, req):
|
||||
"""Handle HTTP PUT request."""
|
||||
drive, part, account, container = split_and_validate_path(req, 3, 4)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
if container: # put account container
|
||||
if 'x-timestamp' not in req.headers:
|
||||
@ -169,7 +173,9 @@ class AccountController(BaseStorageServer):
|
||||
"""Handle HTTP HEAD request."""
|
||||
drive, part, account = split_and_validate_path(req, 3)
|
||||
out_content_type = listing_formats.get_listing_content_type(req)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_account_broker(drive, part, account,
|
||||
pending_timeout=0.1,
|
||||
@ -204,7 +210,9 @@ class AccountController(BaseStorageServer):
|
||||
end_marker = get_param(req, 'end_marker')
|
||||
out_content_type = listing_formats.get_listing_content_type(req)
|
||||
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_account_broker(drive, part, account,
|
||||
pending_timeout=0.1,
|
||||
@ -225,7 +233,9 @@ class AccountController(BaseStorageServer):
|
||||
"""
|
||||
post_args = split_and_validate_path(req, 3)
|
||||
drive, partition, hash = post_args
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
try:
|
||||
args = json.load(req.environ['wsgi.input'])
|
||||
@ -241,7 +251,9 @@ class AccountController(BaseStorageServer):
|
||||
"""Handle HTTP POST request."""
|
||||
drive, part, account = split_and_validate_path(req, 3)
|
||||
req_timestamp = valid_timestamp(req)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_account_broker(drive, part, account)
|
||||
if broker.is_deleted():
|
||||
|
@ -229,7 +229,8 @@ def check_dir(root, drive):
|
||||
|
||||
:param root: base path where the dir is
|
||||
:param drive: drive name to be checked
|
||||
:returns: full path to the device, or None if drive fails to validate
|
||||
:returns: full path to the device
|
||||
:raises ValueError: if drive fails to validate
|
||||
"""
|
||||
return check_drive(root, drive, False)
|
||||
|
||||
@ -243,7 +244,8 @@ def check_mount(root, drive):
|
||||
|
||||
:param root: base path where the devices are mounted
|
||||
:param drive: drive name to be checked
|
||||
:returns: full path to the device, or None if drive fails to validate
|
||||
:returns: full path to the device
|
||||
:raises ValueError: if drive fails to validate
|
||||
"""
|
||||
return check_drive(root, drive, True)
|
||||
|
||||
@ -256,18 +258,19 @@ def check_drive(root, drive, mount_check):
|
||||
:param drive: drive name to be checked
|
||||
:param mount_check: additionally require path is mounted
|
||||
|
||||
:returns: full path to the device, or None if drive fails to validate
|
||||
:returns: full path to the device
|
||||
:raises ValueError: if drive fails to validate
|
||||
"""
|
||||
if not (urllib.parse.quote_plus(drive) == drive):
|
||||
return None
|
||||
raise ValueError('%s is not a valid drive name' % drive)
|
||||
path = os.path.join(root, drive)
|
||||
if mount_check:
|
||||
if utils.ismount(path):
|
||||
return path
|
||||
if not utils.ismount(path):
|
||||
raise ValueError('%s is not mounted' % path)
|
||||
else:
|
||||
if isdir(path):
|
||||
if not isdir(path):
|
||||
raise ValueError('%s is not a directory' % path)
|
||||
return path
|
||||
return None
|
||||
|
||||
|
||||
def check_float(string):
|
||||
|
@ -761,14 +761,15 @@ class Replicator(Daemon):
|
||||
node['replication_ip'],
|
||||
node['replication_port']):
|
||||
found_local = True
|
||||
if not check_drive(self.root, node['device'],
|
||||
self.mount_check):
|
||||
try:
|
||||
dev_path = check_drive(self.root, node['device'],
|
||||
self.mount_check)
|
||||
except ValueError as err:
|
||||
self._add_failure_stats(
|
||||
[(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
for failure_dev in self.ring.devs if failure_dev])
|
||||
self.logger.warning(
|
||||
_('Skipping %(device)s as it is not mounted') % node)
|
||||
self.logger.warning('Skipping: %s', err)
|
||||
continue
|
||||
if node['device'] not in devices_to_replicate:
|
||||
self.logger.debug(
|
||||
@ -776,7 +777,7 @@ class Replicator(Daemon):
|
||||
node['device'])
|
||||
continue
|
||||
unlink_older_than(
|
||||
os.path.join(self.root, node['device'], 'tmp'),
|
||||
os.path.join(dev_path, 'tmp'),
|
||||
time.time() - self.reclaim_age)
|
||||
datadir = os.path.join(self.root, node['device'], self.datadir)
|
||||
if os.path.isdir(datadir):
|
||||
@ -835,9 +836,11 @@ class ReplicatorRpc(object):
|
||||
return HTTPBadRequest(body='Invalid object type')
|
||||
op = args.pop(0)
|
||||
drive, partition, hsh = replicate_args
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
dev_path = check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
db_file = os.path.join(self.root, drive,
|
||||
db_file = os.path.join(dev_path,
|
||||
storage_directory(self.datadir, partition, hsh),
|
||||
hsh + '.db')
|
||||
if op == 'rsync_then_merge':
|
||||
|
@ -209,12 +209,14 @@ class ReconMiddleware(object):
|
||||
continue
|
||||
|
||||
try:
|
||||
mounted = bool(check_mount(self.devices, entry))
|
||||
check_mount(self.devices, entry)
|
||||
except OSError as err:
|
||||
mounted = str(err)
|
||||
mpoint = {'device': entry, 'mounted': mounted}
|
||||
if mpoint['mounted'] is not True:
|
||||
mountlist.append(mpoint)
|
||||
except ValueError:
|
||||
mounted = False
|
||||
else:
|
||||
continue
|
||||
mountlist.append({'device': entry, 'mounted': mounted})
|
||||
return mountlist
|
||||
|
||||
def get_diskusage(self):
|
||||
@ -225,13 +227,14 @@ class ReconMiddleware(object):
|
||||
continue
|
||||
|
||||
try:
|
||||
mounted = bool(check_mount(self.devices, entry))
|
||||
check_mount(self.devices, entry)
|
||||
except OSError as err:
|
||||
devices.append({'device': entry, 'mounted': str(err),
|
||||
'size': '', 'used': '', 'avail': ''})
|
||||
continue
|
||||
|
||||
if mounted:
|
||||
except ValueError:
|
||||
devices.append({'device': entry, 'mounted': False,
|
||||
'size': '', 'used': '', 'avail': ''})
|
||||
else:
|
||||
path = os.path.join(self.devices, entry)
|
||||
disk = os.statvfs(path)
|
||||
capacity = disk.f_bsize * disk.f_blocks
|
||||
@ -240,9 +243,6 @@ class ReconMiddleware(object):
|
||||
devices.append({'device': entry, 'mounted': True,
|
||||
'size': capacity, 'used': used,
|
||||
'avail': available})
|
||||
else:
|
||||
devices.append({'device': entry, 'mounted': False,
|
||||
'size': '', 'used': '', 'avail': ''})
|
||||
return devices
|
||||
|
||||
def get_ring_md5(self):
|
||||
|
@ -305,7 +305,9 @@ class ContainerController(BaseStorageServer):
|
||||
drive, part, account, container, obj = split_and_validate_path(
|
||||
req, 4, 5, True)
|
||||
req_timestamp = valid_timestamp(req)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
# policy index is only relevant for delete_obj (and transitively for
|
||||
# auto create accounts)
|
||||
@ -432,7 +434,9 @@ class ContainerController(BaseStorageServer):
|
||||
self.realms_conf)
|
||||
if err:
|
||||
return HTTPBadRequest(err)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
requested_policy_index = self.get_and_validate_policy_index(req)
|
||||
broker = self._get_container_broker(drive, part, account, container)
|
||||
@ -502,7 +506,9 @@ class ContainerController(BaseStorageServer):
|
||||
drive, part, account, container, obj = split_and_validate_path(
|
||||
req, 4, 5, True)
|
||||
out_content_type = listing_formats.get_listing_content_type(req)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_container_broker(drive, part, account, container,
|
||||
pending_timeout=0.1,
|
||||
@ -635,7 +641,9 @@ class ContainerController(BaseStorageServer):
|
||||
body='Maximum limit is %d'
|
||||
% constraints.CONTAINER_LISTING_LIMIT)
|
||||
out_content_type = listing_formats.get_listing_content_type(req)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_container_broker(drive, part, account, container,
|
||||
pending_timeout=0.1,
|
||||
@ -714,7 +722,9 @@ class ContainerController(BaseStorageServer):
|
||||
"""
|
||||
post_args = split_and_validate_path(req, 3)
|
||||
drive, partition, hash = post_args
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
try:
|
||||
args = json.load(req.environ['wsgi.input'])
|
||||
@ -736,7 +746,9 @@ class ContainerController(BaseStorageServer):
|
||||
self.realms_conf)
|
||||
if err:
|
||||
return HTTPBadRequest(err)
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
broker = self._get_container_broker(drive, part, account, container)
|
||||
if broker.is_deleted():
|
||||
|
@ -101,9 +101,10 @@ class ContainerUpdater(Daemon):
|
||||
"""
|
||||
paths = []
|
||||
for device in self._listdir(self.devices):
|
||||
try:
|
||||
dev_path = check_drive(self.devices, device, self.mount_check)
|
||||
if not dev_path:
|
||||
self.logger.warning(_('%s is not mounted'), device)
|
||||
except ValueError as err:
|
||||
self.logger.warning("%s", err)
|
||||
continue
|
||||
con_path = os.path.join(dev_path, DATADIR)
|
||||
if not os.path.exists(con_path):
|
||||
|
@ -484,11 +484,11 @@ def object_audit_location_generator(devices, datadir, mount_check=True,
|
||||
|
||||
base, policy = split_policy_string(datadir)
|
||||
for device in device_dirs:
|
||||
if not check_drive(devices, device, mount_check):
|
||||
try:
|
||||
check_drive(devices, device, mount_check)
|
||||
except ValueError as err:
|
||||
if logger:
|
||||
logger.debug(
|
||||
'Skipping %s as it is not %s', device,
|
||||
'mounted' if mount_check else 'a dir')
|
||||
logger.debug('Skipping: %s', err)
|
||||
continue
|
||||
|
||||
datadir_path = os.path.join(devices, device, datadir)
|
||||
@ -1248,11 +1248,11 @@ class BaseDiskFileManager(object):
|
||||
# explicitly forbidden from syscall, just return path
|
||||
return join(self.devices, device)
|
||||
# we'll do some kind of check if not explicitly forbidden
|
||||
if mount_check or self.mount_check:
|
||||
mount_check = True
|
||||
else:
|
||||
mount_check = False
|
||||
return check_drive(self.devices, device, mount_check)
|
||||
try:
|
||||
return check_drive(self.devices, device,
|
||||
mount_check or self.mount_check)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@contextmanager
|
||||
def replication_lock(self, device):
|
||||
|
@ -792,17 +792,17 @@ class ObjectReplicator(Daemon):
|
||||
and (override_devices is None
|
||||
or dev['device'] in override_devices))]:
|
||||
found_local = True
|
||||
local_dev_stats = self.stats_for_dev[local_dev['device']]
|
||||
try:
|
||||
dev_path = check_drive(self.devices_dir, local_dev['device'],
|
||||
self.mount_check)
|
||||
local_dev_stats = self.stats_for_dev[local_dev['device']]
|
||||
if not dev_path:
|
||||
except ValueError as err:
|
||||
local_dev_stats.add_failure_stats(
|
||||
[(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
for failure_dev in policy.object_ring.devs
|
||||
if failure_dev])
|
||||
self.logger.warning(
|
||||
_('%s is not mounted'), local_dev['device'])
|
||||
self.logger.warning("%s", err)
|
||||
continue
|
||||
obj_path = join(dev_path, data_dir)
|
||||
tmp_path = join(dev_path, get_tmp_dir(policy))
|
||||
@ -933,13 +933,14 @@ class ObjectReplicator(Daemon):
|
||||
dev_stats = self.stats_for_dev[job['device']]
|
||||
num_jobs += 1
|
||||
current_nodes = job['nodes']
|
||||
dev_path = check_drive(self.devices_dir, job['device'],
|
||||
try:
|
||||
check_drive(self.devices_dir, job['device'],
|
||||
self.mount_check)
|
||||
if not dev_path:
|
||||
except ValueError as err:
|
||||
dev_stats.add_failure_stats([
|
||||
(failure_dev['replication_ip'], failure_dev['device'])
|
||||
for failure_dev in job['nodes']])
|
||||
self.logger.warning(_('%s is not mounted'), job['device'])
|
||||
self.logger.warning("%s", err)
|
||||
continue
|
||||
if self.handoffs_first and not job['delete']:
|
||||
# in handoffs first mode, we won't process primary
|
||||
|
@ -144,12 +144,14 @@ class ObjectUpdater(Daemon):
|
||||
# read from container ring to ensure it's fresh
|
||||
self.get_container_ring().get_nodes('')
|
||||
for device in self._listdir(self.devices):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
try:
|
||||
dev_path = check_drive(self.devices, device,
|
||||
self.mount_check)
|
||||
except ValueError as err:
|
||||
# We don't count this as an error. The occasional
|
||||
# unmounted drive is part of normal cluster operations,
|
||||
# so a simple warning is sufficient.
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
self.logger.warning('Skipping: %s', err)
|
||||
continue
|
||||
while len(pids) >= self.updater_workers:
|
||||
pids.remove(os.wait()[0])
|
||||
@ -161,7 +163,7 @@ class ObjectUpdater(Daemon):
|
||||
eventlet_monkey_patch()
|
||||
self.stats.reset()
|
||||
forkbegin = time.time()
|
||||
self.object_sweep(os.path.join(self.devices, device))
|
||||
self.object_sweep(dev_path)
|
||||
elapsed = time.time() - forkbegin
|
||||
self.logger.info(
|
||||
('Object update sweep of %(device)s '
|
||||
@ -185,14 +187,15 @@ class ObjectUpdater(Daemon):
|
||||
begin = time.time()
|
||||
self.stats.reset()
|
||||
for device in self._listdir(self.devices):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
try:
|
||||
dev_path = check_drive(self.devices, device, self.mount_check)
|
||||
except ValueError as err:
|
||||
# We don't count this as an error. The occasional unmounted
|
||||
# drive is part of normal cluster operations, so a simple
|
||||
# warning is sufficient.
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
self.logger.warning('Skipping: %s', err)
|
||||
continue
|
||||
self.object_sweep(os.path.join(self.devices, device))
|
||||
self.object_sweep(dev_path)
|
||||
elapsed = time.time() - begin
|
||||
self.logger.info(
|
||||
('Object update single-threaded sweep completed: '
|
||||
@ -213,11 +216,11 @@ class ObjectUpdater(Daemon):
|
||||
for asyncdir in self._listdir(device):
|
||||
# we only care about directories
|
||||
async_pending = os.path.join(device, asyncdir)
|
||||
if not os.path.isdir(async_pending):
|
||||
continue
|
||||
if not asyncdir.startswith(ASYNCDIR_BASE):
|
||||
# skip stuff like "accounts", "containers", etc.
|
||||
continue
|
||||
if not os.path.isdir(async_pending):
|
||||
continue
|
||||
try:
|
||||
base, policy = split_policy_string(asyncdir)
|
||||
except PolicyError as e:
|
||||
|
@ -407,10 +407,29 @@ class TestConstraints(unittest.TestCase):
|
||||
def test_check_drive_invalid_path(self):
|
||||
root = '/srv/'
|
||||
with mock_check_drive() as mocks:
|
||||
self.assertIsNone(constraints.check_dir(root, 'foo?bar'))
|
||||
self.assertIsNone(constraints.check_mount(root, 'foo bar'))
|
||||
self.assertIsNone(constraints.check_drive(root, 'foo/bar', True))
|
||||
self.assertIsNone(constraints.check_drive(root, 'foo%bar', False))
|
||||
drive = 'foo?bar'
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_dir(root, drive)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'%s is not a valid drive name' % drive)
|
||||
|
||||
drive = 'foo bar'
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_mount(root, drive)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'%s is not a valid drive name' % drive)
|
||||
|
||||
drive = 'foo/bar'
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_drive(root, drive, True)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'%s is not a valid drive name' % drive)
|
||||
|
||||
drive = 'foo%bar'
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_drive(root, drive, False)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'%s is not a valid drive name' % drive)
|
||||
self.assertEqual([], mocks['isdir'].call_args_list)
|
||||
self.assertEqual([], mocks['ismount'].call_args_list)
|
||||
|
||||
@ -418,11 +437,20 @@ class TestConstraints(unittest.TestCase):
|
||||
root = '/srv'
|
||||
path = 'sdb1'
|
||||
with mock_check_drive(ismount=True) as mocks:
|
||||
self.assertIsNone(constraints.check_dir(root, path))
|
||||
self.assertIsNone(constraints.check_drive(root, path, False))
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_dir(root, path)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'/srv/sdb1 is not a directory')
|
||||
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_drive(root, path, False)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'/srv/sdb1 is not a directory')
|
||||
|
||||
self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')],
|
||||
mocks['isdir'].call_args_list)
|
||||
self.assertEqual([], mocks['ismount'].call_args_list)
|
||||
|
||||
with mock_check_drive(ismount=True) as mocks:
|
||||
self.assertEqual('/srv/sdb1', constraints.check_mount(root, path))
|
||||
self.assertEqual('/srv/sdb1', constraints.check_drive(
|
||||
@ -441,9 +469,18 @@ class TestConstraints(unittest.TestCase):
|
||||
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
|
||||
mocks['isdir'].call_args_list)
|
||||
self.assertEqual([], mocks['ismount'].call_args_list)
|
||||
|
||||
with mock_check_drive(isdir=True) as mocks:
|
||||
self.assertIsNone(constraints.check_mount(root, path))
|
||||
self.assertIsNone(constraints.check_drive(root, path, True))
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_mount(root, path)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'/srv/sdb2 is not mounted')
|
||||
|
||||
with self.assertRaises(ValueError) as exc_mgr:
|
||||
constraints.check_drive(root, path, True)
|
||||
self.assertEqual(str(exc_mgr.exception),
|
||||
'/srv/sdb2 is not mounted')
|
||||
|
||||
self.assertEqual([], mocks['isdir'].call_args_list)
|
||||
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
|
||||
mocks['ismount'].call_args_list)
|
||||
|
@ -545,11 +545,13 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(replicator.mount_check, True)
|
||||
self.assertEqual(replicator.port, 6200)
|
||||
|
||||
err = ValueError('Boom!')
|
||||
|
||||
def mock_check_drive(root, device, mount_check):
|
||||
self.assertEqual(root, replicator.root)
|
||||
self.assertEqual(device, replicator.ring.devs[0]['device'])
|
||||
self.assertEqual(mount_check, True)
|
||||
return None
|
||||
raise err
|
||||
|
||||
self._patch(patch.object, db_replicator, 'check_drive',
|
||||
mock_check_drive)
|
||||
@ -557,8 +559,7 @@ class TestDBReplicator(unittest.TestCase):
|
||||
|
||||
self.assertEqual(
|
||||
replicator.logger.log_dict['warning'],
|
||||
[(('Skipping %(device)s as it is not mounted' %
|
||||
replicator.ring.devs[0],), {})])
|
||||
[(('Skipping: %s', (err, )), {})])
|
||||
|
||||
def test_run_once_node_is_mounted(self):
|
||||
db_replicator.ring = FakeRingWithSingleNode()
|
||||
|
@ -2851,7 +2851,7 @@ class TestContainerController(unittest.TestCase):
|
||||
resp = do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)})
|
||||
self.assertEqual(resp.status_int, 412)
|
||||
with mock.patch('swift.container.server.check_drive',
|
||||
lambda *args: False):
|
||||
side_effect=ValueError('sda1 is not mounted')):
|
||||
resp = do_test({})
|
||||
self.assertEqual(resp.status_int, 507)
|
||||
|
||||
|
@ -148,7 +148,7 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
cu.run_once()
|
||||
log_lines = self.logger.get_lines_for_level('warning')
|
||||
self.assertGreater(len(log_lines), 0)
|
||||
msg = 'sda1 is not mounted'
|
||||
msg = '%s is not mounted' % self.sda1
|
||||
self.assertEqual(log_lines[0], msg)
|
||||
# Ensure that the container_sweep did not run
|
||||
self.assertFalse(mock_sweep.called)
|
||||
|
@ -477,7 +477,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping sdq as it is not mounted',
|
||||
'Skipping: %s/sdq is not mounted' % tmpdir,
|
||||
], debug_lines)
|
||||
|
||||
def test_skipping_files(self):
|
||||
@ -509,7 +509,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping garbage as it is not a dir',
|
||||
'Skipping: %s/garbage is not a directory' % tmpdir,
|
||||
], debug_lines)
|
||||
logger.clear()
|
||||
|
||||
@ -523,7 +523,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping garbage as it is not mounted',
|
||||
'Skipping: %s/garbage is not mounted' % tmpdir,
|
||||
], debug_lines)
|
||||
|
||||
def test_only_catch_expected_errors(self):
|
||||
|
@ -391,7 +391,7 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
|
||||
@mock.patch.object(object_updater, 'check_drive')
|
||||
def test_run_once_with_disk_unmounted(self, mock_check_drive):
|
||||
mock_check_drive.return_value = False
|
||||
mock_check_drive.side_effect = ValueError
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
'mount_check': 'false',
|
||||
@ -431,7 +431,7 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
|
||||
@mock.patch.object(object_updater, 'check_drive')
|
||||
def test_run_once(self, mock_check_drive):
|
||||
mock_check_drive.return_value = True
|
||||
mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
'mount_check': 'false',
|
||||
|
Loading…
Reference in New Issue
Block a user