Have check_drive raise ValueError on errors

...which helps us differentiate between a drive that's not mounted vs.
not a dir better in log messages. We were already doing that a bit in
diskfile.py, and it seems like a useful distinction; let's do it more.

While we're at it, remove some log translations.

Related-Change: I941ffbc568ebfa5964d49964dc20c382a5e2ec2a
Related-Change: I3362a6ebff423016bb367b4b6b322bb41ae08764
Change-Id: Ife0d34f9482adb4524d1ab1fe6c335c6b287c2fd
Partial-Bug: 1674543
This commit is contained in:
Tim Burke 2017-09-15 06:15:40 +00:00
parent c568b4b100
commit f192f51d37
16 changed files with 165 additions and 91 deletions

View File

@ -134,10 +134,11 @@ class AccountReaper(Daemon):
begin = time() begin = time()
try: try:
for device in os.listdir(self.devices): for device in os.listdir(self.devices):
if not check_drive(self.devices, device, self.mount_check): try:
check_drive(self.devices, device, self.mount_check)
except ValueError as err:
self.logger.increment('errors') self.logger.increment('errors')
self.logger.debug( self.logger.debug('Skipping: %s', err)
_('Skipping %s as it is not mounted'), device)
continue continue
self.reap_device(device) self.reap_device(device)
except (Exception, Timeout): except (Exception, Timeout):

View File

@ -88,7 +88,9 @@ class AccountController(BaseStorageServer):
def DELETE(self, req): def DELETE(self, req):
"""Handle HTTP DELETE request.""" """Handle HTTP DELETE request."""
drive, part, account = split_and_validate_path(req, 3) drive, part, account = split_and_validate_path(req, 3)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
req_timestamp = valid_timestamp(req) req_timestamp = valid_timestamp(req)
broker = self._get_account_broker(drive, part, account) broker = self._get_account_broker(drive, part, account)
@ -102,7 +104,9 @@ class AccountController(BaseStorageServer):
def PUT(self, req): def PUT(self, req):
"""Handle HTTP PUT request.""" """Handle HTTP PUT request."""
drive, part, account, container = split_and_validate_path(req, 3, 4) drive, part, account, container = split_and_validate_path(req, 3, 4)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
if container: # put account container if container: # put account container
if 'x-timestamp' not in req.headers: if 'x-timestamp' not in req.headers:
@ -169,7 +173,9 @@ class AccountController(BaseStorageServer):
"""Handle HTTP HEAD request.""" """Handle HTTP HEAD request."""
drive, part, account = split_and_validate_path(req, 3) drive, part, account = split_and_validate_path(req, 3)
out_content_type = listing_formats.get_listing_content_type(req) out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account, broker = self._get_account_broker(drive, part, account,
pending_timeout=0.1, pending_timeout=0.1,
@ -204,7 +210,9 @@ class AccountController(BaseStorageServer):
end_marker = get_param(req, 'end_marker') end_marker = get_param(req, 'end_marker')
out_content_type = listing_formats.get_listing_content_type(req) out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account, broker = self._get_account_broker(drive, part, account,
pending_timeout=0.1, pending_timeout=0.1,
@ -225,7 +233,9 @@ class AccountController(BaseStorageServer):
""" """
post_args = split_and_validate_path(req, 3) post_args = split_and_validate_path(req, 3)
drive, partition, hash = post_args drive, partition, hash = post_args
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
try: try:
args = json.load(req.environ['wsgi.input']) args = json.load(req.environ['wsgi.input'])
@ -241,7 +251,9 @@ class AccountController(BaseStorageServer):
"""Handle HTTP POST request.""" """Handle HTTP POST request."""
drive, part, account = split_and_validate_path(req, 3) drive, part, account = split_and_validate_path(req, 3)
req_timestamp = valid_timestamp(req) req_timestamp = valid_timestamp(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account) broker = self._get_account_broker(drive, part, account)
if broker.is_deleted(): if broker.is_deleted():

View File

@ -229,7 +229,8 @@ def check_dir(root, drive):
:param root: base path where the dir is :param root: base path where the dir is
:param drive: drive name to be checked :param drive: drive name to be checked
:returns: full path to the device, or None if drive fails to validate :returns: full path to the device
:raises ValueError: if drive fails to validate
""" """
return check_drive(root, drive, False) return check_drive(root, drive, False)
@ -243,7 +244,8 @@ def check_mount(root, drive):
:param root: base path where the devices are mounted :param root: base path where the devices are mounted
:param drive: drive name to be checked :param drive: drive name to be checked
:returns: full path to the device, or None if drive fails to validate :returns: full path to the device
:raises ValueError: if drive fails to validate
""" """
return check_drive(root, drive, True) return check_drive(root, drive, True)
@ -256,18 +258,19 @@ def check_drive(root, drive, mount_check):
:param drive: drive name to be checked :param drive: drive name to be checked
:param mount_check: additionally require path is mounted :param mount_check: additionally require path is mounted
:returns: full path to the device, or None if drive fails to validate :returns: full path to the device
:raises ValueError: if drive fails to validate
""" """
if not (urllib.parse.quote_plus(drive) == drive): if not (urllib.parse.quote_plus(drive) == drive):
return None raise ValueError('%s is not a valid drive name' % drive)
path = os.path.join(root, drive) path = os.path.join(root, drive)
if mount_check: if mount_check:
if utils.ismount(path): if not utils.ismount(path):
return path raise ValueError('%s is not mounted' % path)
else: else:
if isdir(path): if not isdir(path):
return path raise ValueError('%s is not a directory' % path)
return None return path
def check_float(string): def check_float(string):

View File

@ -761,14 +761,15 @@ class Replicator(Daemon):
node['replication_ip'], node['replication_ip'],
node['replication_port']): node['replication_port']):
found_local = True found_local = True
if not check_drive(self.root, node['device'], try:
self.mount_check): dev_path = check_drive(self.root, node['device'],
self.mount_check)
except ValueError as err:
self._add_failure_stats( self._add_failure_stats(
[(failure_dev['replication_ip'], [(failure_dev['replication_ip'],
failure_dev['device']) failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev]) for failure_dev in self.ring.devs if failure_dev])
self.logger.warning( self.logger.warning('Skipping: %s', err)
_('Skipping %(device)s as it is not mounted') % node)
continue continue
if node['device'] not in devices_to_replicate: if node['device'] not in devices_to_replicate:
self.logger.debug( self.logger.debug(
@ -776,7 +777,7 @@ class Replicator(Daemon):
node['device']) node['device'])
continue continue
unlink_older_than( unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'), os.path.join(dev_path, 'tmp'),
time.time() - self.reclaim_age) time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir) datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir): if os.path.isdir(datadir):
@ -835,9 +836,11 @@ class ReplicatorRpc(object):
return HTTPBadRequest(body='Invalid object type') return HTTPBadRequest(body='Invalid object type')
op = args.pop(0) op = args.pop(0)
drive, partition, hsh = replicate_args drive, partition, hsh = replicate_args
if not check_drive(self.root, drive, self.mount_check): try:
dev_path = check_drive(self.root, drive, self.mount_check)
except ValueError:
return Response(status='507 %s is not mounted' % drive) return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive, db_file = os.path.join(dev_path,
storage_directory(self.datadir, partition, hsh), storage_directory(self.datadir, partition, hsh),
hsh + '.db') hsh + '.db')
if op == 'rsync_then_merge': if op == 'rsync_then_merge':

View File

@ -209,12 +209,14 @@ class ReconMiddleware(object):
continue continue
try: try:
mounted = bool(check_mount(self.devices, entry)) check_mount(self.devices, entry)
except OSError as err: except OSError as err:
mounted = str(err) mounted = str(err)
mpoint = {'device': entry, 'mounted': mounted} except ValueError:
if mpoint['mounted'] is not True: mounted = False
mountlist.append(mpoint) else:
continue
mountlist.append({'device': entry, 'mounted': mounted})
return mountlist return mountlist
def get_diskusage(self): def get_diskusage(self):
@ -225,13 +227,14 @@ class ReconMiddleware(object):
continue continue
try: try:
mounted = bool(check_mount(self.devices, entry)) check_mount(self.devices, entry)
except OSError as err: except OSError as err:
devices.append({'device': entry, 'mounted': str(err), devices.append({'device': entry, 'mounted': str(err),
'size': '', 'used': '', 'avail': ''}) 'size': '', 'used': '', 'avail': ''})
continue except ValueError:
devices.append({'device': entry, 'mounted': False,
if mounted: 'size': '', 'used': '', 'avail': ''})
else:
path = os.path.join(self.devices, entry) path = os.path.join(self.devices, entry)
disk = os.statvfs(path) disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks capacity = disk.f_bsize * disk.f_blocks
@ -240,9 +243,6 @@ class ReconMiddleware(object):
devices.append({'device': entry, 'mounted': True, devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used, 'size': capacity, 'used': used,
'avail': available}) 'avail': available})
else:
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
return devices return devices
def get_ring_md5(self): def get_ring_md5(self):

View File

@ -305,7 +305,9 @@ class ContainerController(BaseStorageServer):
drive, part, account, container, obj = split_and_validate_path( drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True) req, 4, 5, True)
req_timestamp = valid_timestamp(req) req_timestamp = valid_timestamp(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
# policy index is only relevant for delete_obj (and transitively for # policy index is only relevant for delete_obj (and transitively for
# auto create accounts) # auto create accounts)
@ -432,7 +434,9 @@ class ContainerController(BaseStorageServer):
self.realms_conf) self.realms_conf)
if err: if err:
return HTTPBadRequest(err) return HTTPBadRequest(err)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req) requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container) broker = self._get_container_broker(drive, part, account, container)
@ -502,7 +506,9 @@ class ContainerController(BaseStorageServer):
drive, part, account, container, obj = split_and_validate_path( drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True) req, 4, 5, True)
out_content_type = listing_formats.get_listing_content_type(req) out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container, broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1, pending_timeout=0.1,
@ -635,7 +641,9 @@ class ContainerController(BaseStorageServer):
body='Maximum limit is %d' body='Maximum limit is %d'
% constraints.CONTAINER_LISTING_LIMIT) % constraints.CONTAINER_LISTING_LIMIT)
out_content_type = listing_formats.get_listing_content_type(req) out_content_type = listing_formats.get_listing_content_type(req)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container, broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1, pending_timeout=0.1,
@ -714,7 +722,9 @@ class ContainerController(BaseStorageServer):
""" """
post_args = split_and_validate_path(req, 3) post_args = split_and_validate_path(req, 3)
drive, partition, hash = post_args drive, partition, hash = post_args
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
try: try:
args = json.load(req.environ['wsgi.input']) args = json.load(req.environ['wsgi.input'])
@ -736,7 +746,9 @@ class ContainerController(BaseStorageServer):
self.realms_conf) self.realms_conf)
if err: if err:
return HTTPBadRequest(err) return HTTPBadRequest(err)
if not check_drive(self.root, drive, self.mount_check): try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req) return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container) broker = self._get_container_broker(drive, part, account, container)
if broker.is_deleted(): if broker.is_deleted():

View File

@ -101,9 +101,10 @@ class ContainerUpdater(Daemon):
""" """
paths = [] paths = []
for device in self._listdir(self.devices): for device in self._listdir(self.devices):
dev_path = check_drive(self.devices, device, self.mount_check) try:
if not dev_path: dev_path = check_drive(self.devices, device, self.mount_check)
self.logger.warning(_('%s is not mounted'), device) except ValueError as err:
self.logger.warning("%s", err)
continue continue
con_path = os.path.join(dev_path, DATADIR) con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path): if not os.path.exists(con_path):

View File

@ -484,11 +484,11 @@ def object_audit_location_generator(devices, datadir, mount_check=True,
base, policy = split_policy_string(datadir) base, policy = split_policy_string(datadir)
for device in device_dirs: for device in device_dirs:
if not check_drive(devices, device, mount_check): try:
check_drive(devices, device, mount_check)
except ValueError as err:
if logger: if logger:
logger.debug( logger.debug('Skipping: %s', err)
'Skipping %s as it is not %s', device,
'mounted' if mount_check else 'a dir')
continue continue
datadir_path = os.path.join(devices, device, datadir) datadir_path = os.path.join(devices, device, datadir)
@ -1248,11 +1248,11 @@ class BaseDiskFileManager(object):
# explicitly forbidden from syscall, just return path # explicitly forbidden from syscall, just return path
return join(self.devices, device) return join(self.devices, device)
# we'll do some kind of check if not explicitly forbidden # we'll do some kind of check if not explicitly forbidden
if mount_check or self.mount_check: try:
mount_check = True return check_drive(self.devices, device,
else: mount_check or self.mount_check)
mount_check = False except ValueError:
return check_drive(self.devices, device, mount_check) return None
@contextmanager @contextmanager
def replication_lock(self, device): def replication_lock(self, device):

View File

@ -792,17 +792,17 @@ class ObjectReplicator(Daemon):
and (override_devices is None and (override_devices is None
or dev['device'] in override_devices))]: or dev['device'] in override_devices))]:
found_local = True found_local = True
dev_path = check_drive(self.devices_dir, local_dev['device'],
self.mount_check)
local_dev_stats = self.stats_for_dev[local_dev['device']] local_dev_stats = self.stats_for_dev[local_dev['device']]
if not dev_path: try:
dev_path = check_drive(self.devices_dir, local_dev['device'],
self.mount_check)
except ValueError as err:
local_dev_stats.add_failure_stats( local_dev_stats.add_failure_stats(
[(failure_dev['replication_ip'], [(failure_dev['replication_ip'],
failure_dev['device']) failure_dev['device'])
for failure_dev in policy.object_ring.devs for failure_dev in policy.object_ring.devs
if failure_dev]) if failure_dev])
self.logger.warning( self.logger.warning("%s", err)
_('%s is not mounted'), local_dev['device'])
continue continue
obj_path = join(dev_path, data_dir) obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy)) tmp_path = join(dev_path, get_tmp_dir(policy))
@ -933,13 +933,14 @@ class ObjectReplicator(Daemon):
dev_stats = self.stats_for_dev[job['device']] dev_stats = self.stats_for_dev[job['device']]
num_jobs += 1 num_jobs += 1
current_nodes = job['nodes'] current_nodes = job['nodes']
dev_path = check_drive(self.devices_dir, job['device'], try:
self.mount_check) check_drive(self.devices_dir, job['device'],
if not dev_path: self.mount_check)
except ValueError as err:
dev_stats.add_failure_stats([ dev_stats.add_failure_stats([
(failure_dev['replication_ip'], failure_dev['device']) (failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in job['nodes']]) for failure_dev in job['nodes']])
self.logger.warning(_('%s is not mounted'), job['device']) self.logger.warning("%s", err)
continue continue
if self.handoffs_first and not job['delete']: if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary # in handoffs first mode, we won't process primary

View File

@ -144,12 +144,14 @@ class ObjectUpdater(Daemon):
# read from container ring to ensure it's fresh # read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('') self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices): for device in self._listdir(self.devices):
if not check_drive(self.devices, device, self.mount_check): try:
dev_path = check_drive(self.devices, device,
self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional # We don't count this as an error. The occasional
# unmounted drive is part of normal cluster operations, # unmounted drive is part of normal cluster operations,
# so a simple warning is sufficient. # so a simple warning is sufficient.
self.logger.warning( self.logger.warning('Skipping: %s', err)
_('Skipping %s as it is not mounted'), device)
continue continue
while len(pids) >= self.updater_workers: while len(pids) >= self.updater_workers:
pids.remove(os.wait()[0]) pids.remove(os.wait()[0])
@ -161,7 +163,7 @@ class ObjectUpdater(Daemon):
eventlet_monkey_patch() eventlet_monkey_patch()
self.stats.reset() self.stats.reset()
forkbegin = time.time() forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device)) self.object_sweep(dev_path)
elapsed = time.time() - forkbegin elapsed = time.time() - forkbegin
self.logger.info( self.logger.info(
('Object update sweep of %(device)s ' ('Object update sweep of %(device)s '
@ -185,14 +187,15 @@ class ObjectUpdater(Daemon):
begin = time.time() begin = time.time()
self.stats.reset() self.stats.reset()
for device in self._listdir(self.devices): for device in self._listdir(self.devices):
if not check_drive(self.devices, device, self.mount_check): try:
dev_path = check_drive(self.devices, device, self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional unmounted # We don't count this as an error. The occasional unmounted
# drive is part of normal cluster operations, so a simple # drive is part of normal cluster operations, so a simple
# warning is sufficient. # warning is sufficient.
self.logger.warning( self.logger.warning('Skipping: %s', err)
_('Skipping %s as it is not mounted'), device)
continue continue
self.object_sweep(os.path.join(self.devices, device)) self.object_sweep(dev_path)
elapsed = time.time() - begin elapsed = time.time() - begin
self.logger.info( self.logger.info(
('Object update single-threaded sweep completed: ' ('Object update single-threaded sweep completed: '
@ -213,11 +216,11 @@ class ObjectUpdater(Daemon):
for asyncdir in self._listdir(device): for asyncdir in self._listdir(device):
# we only care about directories # we only care about directories
async_pending = os.path.join(device, asyncdir) async_pending = os.path.join(device, asyncdir)
if not os.path.isdir(async_pending):
continue
if not asyncdir.startswith(ASYNCDIR_BASE): if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc. # skip stuff like "accounts", "containers", etc.
continue continue
if not os.path.isdir(async_pending):
continue
try: try:
base, policy = split_policy_string(asyncdir) base, policy = split_policy_string(asyncdir)
except PolicyError as e: except PolicyError as e:

View File

@ -407,10 +407,29 @@ class TestConstraints(unittest.TestCase):
def test_check_drive_invalid_path(self): def test_check_drive_invalid_path(self):
root = '/srv/' root = '/srv/'
with mock_check_drive() as mocks: with mock_check_drive() as mocks:
self.assertIsNone(constraints.check_dir(root, 'foo?bar')) drive = 'foo?bar'
self.assertIsNone(constraints.check_mount(root, 'foo bar')) with self.assertRaises(ValueError) as exc_mgr:
self.assertIsNone(constraints.check_drive(root, 'foo/bar', True)) constraints.check_dir(root, drive)
self.assertIsNone(constraints.check_drive(root, 'foo%bar', False)) self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_mount(root, drive)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo/bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, drive, True)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo%bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, drive, False)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
self.assertEqual([], mocks['isdir'].call_args_list) self.assertEqual([], mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list) self.assertEqual([], mocks['ismount'].call_args_list)
@ -418,11 +437,20 @@ class TestConstraints(unittest.TestCase):
root = '/srv' root = '/srv'
path = 'sdb1' path = 'sdb1'
with mock_check_drive(ismount=True) as mocks: with mock_check_drive(ismount=True) as mocks:
self.assertIsNone(constraints.check_dir(root, path)) with self.assertRaises(ValueError) as exc_mgr:
self.assertIsNone(constraints.check_drive(root, path, False)) constraints.check_dir(root, path)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb1 is not a directory')
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, path, False)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb1 is not a directory')
self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')], self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')],
mocks['isdir'].call_args_list) mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list) self.assertEqual([], mocks['ismount'].call_args_list)
with mock_check_drive(ismount=True) as mocks: with mock_check_drive(ismount=True) as mocks:
self.assertEqual('/srv/sdb1', constraints.check_mount(root, path)) self.assertEqual('/srv/sdb1', constraints.check_mount(root, path))
self.assertEqual('/srv/sdb1', constraints.check_drive( self.assertEqual('/srv/sdb1', constraints.check_drive(
@ -441,9 +469,18 @@ class TestConstraints(unittest.TestCase):
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')], self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
mocks['isdir'].call_args_list) mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list) self.assertEqual([], mocks['ismount'].call_args_list)
with mock_check_drive(isdir=True) as mocks: with mock_check_drive(isdir=True) as mocks:
self.assertIsNone(constraints.check_mount(root, path)) with self.assertRaises(ValueError) as exc_mgr:
self.assertIsNone(constraints.check_drive(root, path, True)) constraints.check_mount(root, path)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb2 is not mounted')
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, path, True)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb2 is not mounted')
self.assertEqual([], mocks['isdir'].call_args_list) self.assertEqual([], mocks['isdir'].call_args_list)
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')], self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
mocks['ismount'].call_args_list) mocks['ismount'].call_args_list)

View File

@ -545,11 +545,13 @@ class TestDBReplicator(unittest.TestCase):
self.assertEqual(replicator.mount_check, True) self.assertEqual(replicator.mount_check, True)
self.assertEqual(replicator.port, 6200) self.assertEqual(replicator.port, 6200)
err = ValueError('Boom!')
def mock_check_drive(root, device, mount_check): def mock_check_drive(root, device, mount_check):
self.assertEqual(root, replicator.root) self.assertEqual(root, replicator.root)
self.assertEqual(device, replicator.ring.devs[0]['device']) self.assertEqual(device, replicator.ring.devs[0]['device'])
self.assertEqual(mount_check, True) self.assertEqual(mount_check, True)
return None raise err
self._patch(patch.object, db_replicator, 'check_drive', self._patch(patch.object, db_replicator, 'check_drive',
mock_check_drive) mock_check_drive)
@ -557,8 +559,7 @@ class TestDBReplicator(unittest.TestCase):
self.assertEqual( self.assertEqual(
replicator.logger.log_dict['warning'], replicator.logger.log_dict['warning'],
[(('Skipping %(device)s as it is not mounted' % [(('Skipping: %s', (err, )), {})])
replicator.ring.devs[0],), {})])
def test_run_once_node_is_mounted(self): def test_run_once_node_is_mounted(self):
db_replicator.ring = FakeRingWithSingleNode() db_replicator.ring = FakeRingWithSingleNode()

View File

@ -2851,7 +2851,7 @@ class TestContainerController(unittest.TestCase):
resp = do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)}) resp = do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)})
self.assertEqual(resp.status_int, 412) self.assertEqual(resp.status_int, 412)
with mock.patch('swift.container.server.check_drive', with mock.patch('swift.container.server.check_drive',
lambda *args: False): side_effect=ValueError('sda1 is not mounted')):
resp = do_test({}) resp = do_test({})
self.assertEqual(resp.status_int, 507) self.assertEqual(resp.status_int, 507)

View File

@ -148,7 +148,7 @@ class TestContainerUpdater(unittest.TestCase):
cu.run_once() cu.run_once()
log_lines = self.logger.get_lines_for_level('warning') log_lines = self.logger.get_lines_for_level('warning')
self.assertGreater(len(log_lines), 0) self.assertGreater(len(log_lines), 0)
msg = 'sda1 is not mounted' msg = '%s is not mounted' % self.sda1
self.assertEqual(log_lines[0], msg) self.assertEqual(log_lines[0], msg)
# Ensure that the container_sweep did not run # Ensure that the container_sweep did not run
self.assertFalse(mock_sweep.called) self.assertFalse(mock_sweep.called)

View File

@ -477,7 +477,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
logger=logger)] logger=logger)]
debug_lines = logger.get_lines_for_level('debug') debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([ self.assertEqual([
'Skipping sdq as it is not mounted', 'Skipping: %s/sdq is not mounted' % tmpdir,
], debug_lines) ], debug_lines)
def test_skipping_files(self): def test_skipping_files(self):
@ -509,7 +509,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
logger=logger)] logger=logger)]
debug_lines = logger.get_lines_for_level('debug') debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([ self.assertEqual([
'Skipping garbage as it is not a dir', 'Skipping: %s/garbage is not a directory' % tmpdir,
], debug_lines) ], debug_lines)
logger.clear() logger.clear()
@ -523,7 +523,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
logger=logger)] logger=logger)]
debug_lines = logger.get_lines_for_level('debug') debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([ self.assertEqual([
'Skipping garbage as it is not mounted', 'Skipping: %s/garbage is not mounted' % tmpdir,
], debug_lines) ], debug_lines)
def test_only_catch_expected_errors(self): def test_only_catch_expected_errors(self):

View File

@ -391,7 +391,7 @@ class TestObjectUpdater(unittest.TestCase):
@mock.patch.object(object_updater, 'check_drive') @mock.patch.object(object_updater, 'check_drive')
def test_run_once_with_disk_unmounted(self, mock_check_drive): def test_run_once_with_disk_unmounted(self, mock_check_drive):
mock_check_drive.return_value = False mock_check_drive.side_effect = ValueError
ou = object_updater.ObjectUpdater({ ou = object_updater.ObjectUpdater({
'devices': self.devices_dir, 'devices': self.devices_dir,
'mount_check': 'false', 'mount_check': 'false',
@ -431,7 +431,7 @@ class TestObjectUpdater(unittest.TestCase):
@mock.patch.object(object_updater, 'check_drive') @mock.patch.object(object_updater, 'check_drive')
def test_run_once(self, mock_check_drive): def test_run_once(self, mock_check_drive):
mock_check_drive.return_value = True mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
ou = object_updater.ObjectUpdater({ ou = object_updater.ObjectUpdater({
'devices': self.devices_dir, 'devices': self.devices_dir,
'mount_check': 'false', 'mount_check': 'false',