Fix multiple issues with revert to snapshot in LVM
Reverts with the LVM driver were happening asynchronously which resulted in unreliable behavior. This change makes the reverts synchronous. Also, the driver now unexports and unmounts both the share and the snapshot during the revert (required to make the operation synchronous) which requires changing the driver interface to include the snapshot access rules in addition to the share access rules. Closes-bug: #1707943 Change-Id: Ifea1799e1d2e3963fec7e90ce3f9cb47b9f02f4f
This commit is contained in:
parent
5084efe621
commit
5cb87d5dc3
@ -177,9 +177,5 @@ e2fsck: CommandFilter, e2fsck, root
|
||||
# manila/share/drivers/lvm.py: lvconvert --merge %s
|
||||
lvconvert: CommandFilter, lvconvert, root
|
||||
|
||||
# manila/share/drivers/lvm.py: lvchange -an %s
|
||||
# manila/share/drivers/lvm.py: lvchange -ay %s
|
||||
lvchange: CommandFilter, lvchange, root
|
||||
|
||||
# manila/data/utils.py: 'sha256sum', '%s'
|
||||
sha256sum: CommandFilter, sha256sum, root
|
||||
|
@ -991,8 +991,8 @@ class ShareDriver(object):
|
||||
the failure.
|
||||
"""
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, share_server=None):
|
||||
"""Reverts a share (in place) to the specified snapshot.
|
||||
|
||||
Does not delete the share snapshot. The share and snapshot must both
|
||||
@ -1006,7 +1006,10 @@ class ShareDriver(object):
|
||||
|
||||
:param context: Current context
|
||||
:param snapshot: The snapshot to be restored
|
||||
:param access_rules: List of all access rules for the affected share
|
||||
:param share_access_rules: List of all access rules for the affected
|
||||
share
|
||||
:param snapshot_access_rules: List of all access rules for the affected
|
||||
snapshot
|
||||
:param share_server: Optional -- Share server model or None
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
@ -2103,7 +2106,8 @@ class ShareDriver(object):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, access_rules,
|
||||
replica_snapshots, share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a replicated share (in place) to the specified snapshot.
|
||||
|
||||
@ -2130,7 +2134,9 @@ class ShareDriver(object):
|
||||
These snapshot instances track the snapshot across the replicas.
|
||||
The snapshot of the active replica to be restored with have its
|
||||
status attribute set to 'restoring'.
|
||||
:param access_rules: List of access rules for the affected share.
|
||||
:param share_access_rules: List of access rules for the affected share.
|
||||
:param snapshot_access_rules: List of access rules for the affected
|
||||
snapshot.
|
||||
:param share_server: Optional -- Share server model
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
@ -807,14 +807,16 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': six.text_type(new_size)})
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, share_server=None):
|
||||
"""Reverts a share to a given snapshot.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param snapshot: The snapshot to which the share is to be reverted to.
|
||||
:param access_rules: List of all access rules for the affected share.
|
||||
Not used by this driver.
|
||||
:param share_access_rules: List of all access rules for the affected
|
||||
share. Not used by this driver.
|
||||
:param snapshot_access_rules: List of all access rules for the affected
|
||||
snapshot. Not used by this driver.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
|
@ -260,27 +260,26 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
return location
|
||||
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
self._remove_export(context, share)
|
||||
self._unmount_device(share)
|
||||
self._delete_share(context, share)
|
||||
self._deallocate_container(share['name'])
|
||||
|
||||
def _remove_export(self, ctx, share):
|
||||
"""Removes an access rules for a share."""
|
||||
mount_path = self._get_mount_path(share)
|
||||
def _unmount_device(self, share_or_snapshot):
|
||||
"""Unmount the filesystem of a share or snapshot LV."""
|
||||
mount_path = self._get_mount_path(share_or_snapshot)
|
||||
if os.path.exists(mount_path):
|
||||
# umount, may be busy
|
||||
try:
|
||||
self._execute('umount', '-f', mount_path, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if 'device is busy' in six.text_type(exc):
|
||||
raise exception.ShareBusyException(reason=share['name'])
|
||||
raise exception.ShareBusyException(
|
||||
reason=share_or_snapshot['name'])
|
||||
else:
|
||||
LOG.info('Unable to umount: %s', exc)
|
||||
LOG.error('Unable to umount: %s', exc)
|
||||
raise
|
||||
# remove dir
|
||||
try:
|
||||
os.rmdir(mount_path)
|
||||
except OSError:
|
||||
LOG.warning('Unable to delete %s', mount_path)
|
||||
self._execute('rmdir', mount_path, run_as_root=True)
|
||||
|
||||
def ensure_share(self, ctx, share, share_server=None):
|
||||
"""Ensure that storage are mounted and exported."""
|
||||
@ -336,9 +335,9 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
else:
|
||||
raise exception.InvalidShare(reason='Wrong share protocol')
|
||||
|
||||
def _mount_device(self, share, device_name):
|
||||
"""Mount LVM share and ignore if already mounted."""
|
||||
mount_path = self._get_mount_path(share)
|
||||
def _mount_device(self, share_or_snapshot, device_name):
|
||||
"""Mount LV for share or snapshot and ignore if already mounted."""
|
||||
mount_path = self._get_mount_path(share_or_snapshot)
|
||||
self._execute('mkdir', '-p', mount_path)
|
||||
try:
|
||||
self._execute('mount', device_name, mount_path,
|
||||
@ -353,15 +352,10 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
raise
|
||||
return mount_path
|
||||
|
||||
def _unmount_device(self, share):
|
||||
mount_path = self._get_mount_path(share)
|
||||
self._execute('umount', mount_path, run_as_root=True)
|
||||
self._execute('rmdir', mount_path, run_as_root=True)
|
||||
|
||||
def _get_mount_path(self, share):
|
||||
"""Returns path where share is mounted."""
|
||||
def _get_mount_path(self, share_or_snapshot):
|
||||
"""Returns path where share or snapshot is mounted."""
|
||||
return os.path.join(self.configuration.share_mount_path,
|
||||
share['name'])
|
||||
share_or_snapshot['name'])
|
||||
|
||||
def _copy_volume(self, srcstr, deststr, size_in_g):
|
||||
# Use O_DIRECT to avoid thrashing the system buffer cache
|
||||
@ -384,53 +378,56 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
self._extend_container(share, device_name, new_size)
|
||||
self._execute('resize2fs', device_name, run_as_root=True)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, share_server=None):
|
||||
share = snapshot['share']
|
||||
# Temporarily remove all access rules
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
snapshot['name'], [], [], [])
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
share['name'], [], [], [])
|
||||
# Unmount the filesystem
|
||||
self._remove_export(context, snapshot)
|
||||
# First we merge the snapshot LV and the share LV
|
||||
# This won't actually do anything until the LV is reactivated
|
||||
# Unmount the snapshot filesystem
|
||||
self._unmount_device(snapshot)
|
||||
# Unmount the share filesystem
|
||||
self._unmount_device(share)
|
||||
# Merge the snapshot LV back into the share, reverting it
|
||||
snap_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group,
|
||||
snapshot['name'])
|
||||
self._execute('lvconvert', '--merge', snap_lv_name, run_as_root=True)
|
||||
# Unmount the share so we can deactivate it
|
||||
self._unmount_device(share)
|
||||
# Deactivate the share LV
|
||||
share_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group,
|
||||
share['name'])
|
||||
self._execute('lvchange', '-an', share_lv_name, run_as_root=True)
|
||||
# Reactivate the share LV. This will trigger the merge and delete the
|
||||
# snapshot.
|
||||
self._execute('lvchange', '-ay', share_lv_name, run_as_root=True)
|
||||
|
||||
# Now recreate the snapshot that was destroyed by the merge
|
||||
self._create_snapshot(context, snapshot)
|
||||
# At this point we can mount the share again
|
||||
device_name = self._get_local_path(share)
|
||||
self._mount_device(share, device_name)
|
||||
# Also remount the snapshot
|
||||
device_name = self._get_local_path(snapshot)
|
||||
self._mount_device(snapshot, device_name)
|
||||
# Lastly we add all the access rules back
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
share['name'], access_rules,
|
||||
share['name'],
|
||||
share_access_rules,
|
||||
[], [])
|
||||
snapshot_access_rules, __, __ = utils.change_rules_to_readonly(
|
||||
snapshot_access_rules, [], [])
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
snapshot['name'],
|
||||
snapshot_access_rules,
|
||||
[], [])
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
self._create_snapshot(context, snapshot)
|
||||
|
||||
helper = self._get_helper(snapshot['share'])
|
||||
exports = helper.create_exports(self.share_server, snapshot['name'])
|
||||
|
||||
device_name = self._get_local_path(snapshot)
|
||||
self._mount_device(snapshot, device_name)
|
||||
|
||||
helper = self._get_helper(snapshot['share'])
|
||||
exports = helper.create_exports(self.share_server, snapshot['name'])
|
||||
|
||||
return {'export_locations': exports}
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
self._remove_export(context, snapshot)
|
||||
self._unmount_device(snapshot)
|
||||
|
||||
super(LVMShareDriver, self).delete_snapshot(context, snapshot,
|
||||
share_server)
|
||||
|
@ -55,7 +55,8 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules, **kwargs):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
@ -170,7 +171,8 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, access_rules,
|
||||
replica_snapshots, share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -55,7 +55,8 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules, **kwargs):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
@ -185,7 +186,8 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, access_rules,
|
||||
replica_snapshots, share_access_rules,
|
||||
snapshot_access_rules,
|
||||
**kwargs):
|
||||
return self.library.revert_to_replicated_snapshot(
|
||||
context, active_replica, replica_list, active_replica_snapshot,
|
||||
|
@ -2597,20 +2597,24 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share = snapshot['share']
|
||||
share_id = share['id']
|
||||
share_instance_id = snapshot.instance.share_instance_id
|
||||
access_rules = self.access_helper.get_share_instance_access_rules(
|
||||
context, filters={'state': constants.STATUS_ACTIVE},
|
||||
share_instance_id=share_instance_id)
|
||||
share_access_rules = (
|
||||
self.access_helper.get_share_instance_access_rules(
|
||||
context, filters={'state': constants.STATUS_ACTIVE},
|
||||
share_instance_id=share_instance_id))
|
||||
snapshot_access_rules = (
|
||||
self.snapshot_access_helper.get_snapshot_instance_access_rules(
|
||||
context, snapshot.instance['id']))
|
||||
|
||||
if share.get('has_replicas'):
|
||||
self._revert_to_replicated_snapshot(
|
||||
context, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
context, share, snapshot, reservations, share_access_rules,
|
||||
snapshot_access_rules, share_id=share_id)
|
||||
else:
|
||||
self._revert_to_snapshot(context, share, snapshot, reservations,
|
||||
access_rules)
|
||||
share_access_rules, snapshot_access_rules)
|
||||
|
||||
def _revert_to_snapshot(self, context, share, snapshot, reservations,
|
||||
access_rules):
|
||||
share_access_rules, snapshot_access_rules):
|
||||
|
||||
share_server = self._get_share_server(context, share)
|
||||
share_id = share['id']
|
||||
@ -2629,7 +2633,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
self.driver.revert_to_snapshot(context,
|
||||
snapshot_instance_dict,
|
||||
access_rules,
|
||||
share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_server=share_server)
|
||||
except Exception as excep:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -2975,8 +2980,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
@locked_share_replica_operation
|
||||
def _revert_to_replicated_snapshot(self, context, share, snapshot,
|
||||
reservations, access_rules,
|
||||
share_id=None):
|
||||
reservations, share_access_rules,
|
||||
snapshot_access_rules, share_id=None):
|
||||
|
||||
share_server = self._get_share_server(context, share)
|
||||
snapshot_id = snapshot['id']
|
||||
@ -3013,7 +3018,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
self.driver.revert_to_replicated_snapshot(
|
||||
context, active_replica, replica_list, active_replica_snapshot,
|
||||
replica_snapshots, access_rules, share_server=share_server)
|
||||
replica_snapshots, share_access_rules,
|
||||
snapshot_access_rules, share_server=share_server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
||||
|
@ -166,3 +166,8 @@ class ShareSnapshotInstanceAccess(object):
|
||||
for rule in rules:
|
||||
self.db.share_snapshot_instance_access_delete(
|
||||
context, rule['access_id'], snapshot_instance_id)
|
||||
|
||||
def get_snapshot_instance_access_rules(self, context,
|
||||
snapshot_instance_id):
|
||||
return self.db.share_snapshot_access_get_all_for_snapshot_instance(
|
||||
context, snapshot_instance_id)
|
||||
|
@ -320,8 +320,8 @@ class DummyDriver(driver.ShareDriver):
|
||||
"""Removes the specified snapshot from Manila management."""
|
||||
|
||||
@slow_me_down
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, share_access_rules,
|
||||
snapshot_access_rules, share_server=None):
|
||||
"""Reverts a share (in place) to the specified snapshot."""
|
||||
|
||||
@slow_me_down
|
||||
@ -496,7 +496,8 @@ class DummyDriver(driver.ShareDriver):
|
||||
@slow_me_down
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, access_rules,
|
||||
replica_snapshots, share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a replicated share (in place) to the specified snapshot."""
|
||||
|
||||
|
@ -1135,7 +1135,7 @@ class HitachiHNASTestCase(test.TestCase):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'tree_clone',
|
||||
mock.Mock(side_effect=exc))
|
||||
|
||||
self._driver.revert_to_snapshot('context', snap, None)
|
||||
self._driver.revert_to_snapshot('context', snap, None, None)
|
||||
|
||||
driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with()
|
||||
ssh.HNASSSHBackend.tree_delete.assert_called_once_with(
|
||||
|
@ -18,6 +18,7 @@ import os
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
|
||||
@ -286,21 +287,7 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
self._driver.get_share_stats(refresh=True))
|
||||
self._driver._update_share_stats.assert_called_once_with()
|
||||
|
||||
def test_remove_export(self):
|
||||
mount_path = self._get_mount_path(self.share)
|
||||
self._os.path.exists.return_value = True
|
||||
|
||||
self._driver._remove_export(self._context, self.share)
|
||||
|
||||
expected_exec = [
|
||||
"umount -f %s" % (mount_path,),
|
||||
]
|
||||
|
||||
self._os.path.exists.assert_called_with(mount_path)
|
||||
self._os.rmdir.assert_called_with(mount_path)
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
|
||||
def test_remove_export_is_busy_error(self):
|
||||
def test__unmount_device_is_busy_error(self):
|
||||
def exec_runner(*ignore_args, **ignore_kwargs):
|
||||
raise exception.ProcessExecutionError(stderr='device is busy')
|
||||
self._os.path.exists.return_value = True
|
||||
@ -311,37 +298,33 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
|
||||
|
||||
self.assertRaises(exception.ShareBusyException,
|
||||
self._driver._remove_export, self._context,
|
||||
self._driver._unmount_device,
|
||||
self.share)
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
|
||||
def test_remove_export_error(self):
|
||||
def test__unmount_device_error(self):
|
||||
def exec_runner(*ignore_args, **ignore_kwargs):
|
||||
raise exception.ProcessExecutionError(stderr='fake error')
|
||||
|
||||
mount_path = self._get_mount_path(self.share)
|
||||
expected_exec = [
|
||||
"umount -f %s" % (mount_path),
|
||||
]
|
||||
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
|
||||
self._os.path.exists.return_value = True
|
||||
self._driver._remove_export(self._context, self.share)
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
|
||||
def test_remove_export_rmdir_error(self):
|
||||
mount_path = self._get_mount_path(self.share)
|
||||
self._os.path.exists.return_value = True
|
||||
self.mock_object(self._os, 'rmdir', mock.Mock(side_effect=OSError))
|
||||
|
||||
self._driver._remove_export(self._context, self.share)
|
||||
|
||||
expected_exec = [
|
||||
"umount -f %s" % (mount_path,),
|
||||
]
|
||||
|
||||
cmd = "umount -f %s" % (mount_path)
|
||||
fake_utils.fake_execute_set_repliers([(cmd, exec_runner)])
|
||||
self.assertRaises(processutils.ProcessExecutionError,
|
||||
self._driver._unmount_device,
|
||||
self.share)
|
||||
self._os.path.exists.assert_called_with(mount_path)
|
||||
|
||||
def test__unmount_device_rmdir_error(self):
|
||||
def exec_runner(*ignore_args, **ignore_kwargs):
|
||||
raise exception.ProcessExecutionError(stderr='fake error')
|
||||
mount_path = self._get_mount_path(self.share)
|
||||
self._os.path.exists.return_value = True
|
||||
cmd = "rmdir %s" % (mount_path)
|
||||
fake_utils.fake_execute_set_repliers([(cmd, exec_runner)])
|
||||
self.assertRaises(processutils.ProcessExecutionError,
|
||||
self._driver._unmount_device,
|
||||
self.share)
|
||||
self._os.path.exists.assert_called_with(mount_path)
|
||||
self._os.rmdir.assert_called_with(mount_path)
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self._driver.create_snapshot(self._context, self.snapshot,
|
||||
@ -375,8 +358,10 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
self._driver._delete_share(self._context, self.share)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
mount_path = self._get_mount_path(self.snapshot)
|
||||
expected_exec = [
|
||||
'umount -f ' + self._get_mount_path(self.snapshot),
|
||||
'umount -f %s' % mount_path,
|
||||
'rmdir %s' % mount_path,
|
||||
'lvremove -f fakevg/fakesnapshotname',
|
||||
]
|
||||
self._driver.delete_snapshot(self._context, self.snapshot,
|
||||
@ -482,14 +467,16 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
def _get_mount_path(self, share):
|
||||
return os.path.join(CONF.lvm_share_export_root, share['name'])
|
||||
|
||||
def test_unmount_device(self):
|
||||
def test__unmount_device(self):
|
||||
mount_path = self._get_mount_path(self.share)
|
||||
self._os.path.exists.return_value = True
|
||||
self.mock_object(self._driver, '_execute')
|
||||
self._driver._unmount_device(self.share)
|
||||
self._driver._execute.assert_any_call('umount', mount_path,
|
||||
self._driver._execute.assert_any_call('umount', '-f', mount_path,
|
||||
run_as_root=True)
|
||||
self._driver._execute.assert_any_call('rmdir', mount_path,
|
||||
run_as_root=True)
|
||||
self._os.path.exists.assert_called_with(mount_path)
|
||||
|
||||
def test_extend_share(self):
|
||||
local_path = self._driver._get_local_path(self.share)
|
||||
@ -579,18 +566,17 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
mock_update_access = self.mock_object(self._helper_nfs,
|
||||
'update_access')
|
||||
self._driver.revert_to_snapshot(self._context, self.snapshot,
|
||||
[], self.share_server)
|
||||
[], [], self.share_server)
|
||||
snap_lv = "%s/fakesnapshotname" % (CONF.lvm_share_volume_group)
|
||||
share_lv = "%s/fakename" % (CONF.lvm_share_volume_group)
|
||||
share_mount_path = self._get_mount_path(self.snapshot['share'])
|
||||
snapshot_mount_path = self._get_mount_path(self.snapshot)
|
||||
expected_exec = [
|
||||
('umount -f %s' % snapshot_mount_path),
|
||||
("lvconvert --merge %s" % snap_lv),
|
||||
("umount %s" % share_mount_path),
|
||||
("rmdir %s" % snapshot_mount_path),
|
||||
("umount -f %s" % share_mount_path),
|
||||
("rmdir %s" % share_mount_path),
|
||||
("lvchange -an %s" % share_lv),
|
||||
("lvchange -ay %s" % share_lv),
|
||||
("lvconvert --merge %s" % snap_lv),
|
||||
("lvcreate -L 1G --name fakesnapshotname --snapshot %s" %
|
||||
share_lv),
|
||||
('tune2fs -U random /dev/mapper/%s-fakesnapshotname' %
|
||||
@ -605,7 +591,7 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
("chmod 777 %s" % snapshot_mount_path),
|
||||
]
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
self.assertEqual(2, mock_update_access.call_count)
|
||||
self.assertEqual(4, mock_update_access.call_count)
|
||||
|
||||
def test_snapshot_update_access(self):
|
||||
access_rules = [{
|
||||
|
@ -5565,51 +5565,61 @@ class ShareManagerTestCase(test.TestCase):
|
||||
reservations = 'fake_reservations'
|
||||
share_id = 'fake_share_id'
|
||||
snapshot_id = 'fake_snapshot_id'
|
||||
instance_id = 'fake_instance_id'
|
||||
snapshot_instance_id = 'fake_snapshot_instance_id'
|
||||
share_instance_id = 'fake_share_instance_id'
|
||||
share_instance = fakes.fake_share_instance(
|
||||
id=instance_id, share_id=share_id)
|
||||
id=share_instance_id, share_id=share_id)
|
||||
share = fakes.fake_share(
|
||||
id=share_id, instance=share_instance,
|
||||
project_id='fake_project', user_id='fake_user', size=2,
|
||||
has_replicas=has_replicas)
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
share_id=instance_id, share=share, name='fake_snapshot',
|
||||
share_instance=share_instance, share_instance_id=instance_id)
|
||||
id=snapshot_instance_id, share_id=share_instance_id, share=share,
|
||||
name='fake_snapshot', share_instance=share_instance,
|
||||
share_instance_id=share_instance_id)
|
||||
snapshot = fakes.fake_snapshot(
|
||||
id=snapshot_id, share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = ['fake_access_rule']
|
||||
share_access_rules = ['fake_share_access_rule']
|
||||
snapshot_access_rules = ['fake_snapshot_access_rule']
|
||||
|
||||
mock_share_snapshot_get = self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
mock.Mock(return_value=snapshot))
|
||||
mock_access_get = self.mock_object(
|
||||
mock_share_access_get = self.mock_object(
|
||||
self.share_manager.access_helper,
|
||||
'get_share_instance_access_rules',
|
||||
mock.Mock(return_value=access_rules))
|
||||
mock.Mock(return_value=share_access_rules))
|
||||
mock_snapshot_access_get = self.mock_object(
|
||||
self.share_manager.snapshot_access_helper,
|
||||
'get_snapshot_instance_access_rules',
|
||||
mock.Mock(return_value=snapshot_access_rules))
|
||||
mock_revert_to_snapshot = self.mock_object(
|
||||
self.share_manager, '_revert_to_snapshot')
|
||||
mock_revert_to_replicated_snapshot = self.mock_object(
|
||||
self.share_manager, '_revert_to_replicated_snapshot')
|
||||
|
||||
self.share_manager.revert_to_snapshot(
|
||||
self.context, snapshot_id, reservations)
|
||||
self.share_manager.revert_to_snapshot(self.context, snapshot_id,
|
||||
reservations)
|
||||
|
||||
mock_share_snapshot_get.assert_called_once_with(mock.ANY, snapshot_id)
|
||||
mock_access_get.assert_called_once_with(
|
||||
mock_share_access_get.assert_called_once_with(
|
||||
mock.ANY, filters={'state': constants.STATUS_ACTIVE},
|
||||
share_instance_id=instance_id)
|
||||
share_instance_id=share_instance_id)
|
||||
mock_snapshot_access_get.assert_called_once_with(
|
||||
mock.ANY, snapshot_instance_id)
|
||||
|
||||
if not has_replicas:
|
||||
mock_revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY, share, snapshot, reservations, access_rules)
|
||||
mock.ANY, share, snapshot, reservations, share_access_rules,
|
||||
snapshot_access_rules)
|
||||
self.assertFalse(mock_revert_to_replicated_snapshot.called)
|
||||
else:
|
||||
self.assertFalse(mock_revert_to_snapshot.called)
|
||||
mock_revert_to_replicated_snapshot.assert_called_once_with(
|
||||
mock.ANY, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
mock.ANY, share, snapshot, reservations, share_access_rules,
|
||||
snapshot_access_rules, share_id=share_id)
|
||||
|
||||
@ddt.data(None, 'fake_reservations')
|
||||
def test__revert_to_snapshot(self, reservations):
|
||||
@ -5633,7 +5643,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='fake_snapshot_id', share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = []
|
||||
share_access_rules = []
|
||||
snapshot_access_rules = []
|
||||
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
@ -5646,14 +5657,16 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock_share_snapshot_update = self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_update')
|
||||
|
||||
self.share_manager._revert_to_snapshot(
|
||||
self.context, share, snapshot, reservations, access_rules)
|
||||
self.share_manager._revert_to_snapshot(self.context, share, snapshot,
|
||||
reservations,
|
||||
share_access_rules,
|
||||
snapshot_access_rules)
|
||||
|
||||
mock_driver.revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY,
|
||||
self._get_snapshot_instance_dict(
|
||||
snapshot_instance, share, snapshot=snapshot),
|
||||
access_rules,
|
||||
share_access_rules, snapshot_access_rules,
|
||||
share_server=None)
|
||||
|
||||
self.assertFalse(mock_quotas_rollback.called)
|
||||
@ -5696,7 +5709,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='fake_snapshot_id', share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = []
|
||||
share_access_rules = []
|
||||
snapshot_access_rules = []
|
||||
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
@ -5715,13 +5729,15 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share,
|
||||
snapshot,
|
||||
reservations,
|
||||
access_rules)
|
||||
share_access_rules,
|
||||
snapshot_access_rules)
|
||||
|
||||
mock_driver.revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY,
|
||||
self._get_snapshot_instance_dict(
|
||||
snapshot_instance, share, snapshot=snapshot),
|
||||
access_rules,
|
||||
share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_server=None)
|
||||
|
||||
self.assertFalse(mock_quotas_commit.called)
|
||||
@ -5915,7 +5931,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='rid2', share_id=share_id, host='secondary',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False)
|
||||
replicas = [active_replica, replica]
|
||||
access_rules = []
|
||||
share_access_rules = []
|
||||
snapshot_access_rules = []
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(
|
||||
@ -5937,8 +5954,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.db, 'share_snapshot_instance_update')
|
||||
|
||||
self.share_manager._revert_to_replicated_snapshot(
|
||||
self.context, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
self.context, share, snapshot, reservations, share_access_rules,
|
||||
snapshot_access_rules, share_id=share_id)
|
||||
|
||||
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
|
||||
self.assertFalse(mock_quotas_rollback.called)
|
||||
@ -5981,7 +5998,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False,
|
||||
share_type_id='fake_share_type_id')
|
||||
replicas = [active_replica, replica]
|
||||
access_rules = []
|
||||
share_access_rules = []
|
||||
snapshot_access_rules = []
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(
|
||||
@ -6010,7 +6028,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share,
|
||||
snapshot,
|
||||
reservations,
|
||||
access_rules,
|
||||
share_access_rules,
|
||||
snapshot_access_rules,
|
||||
share_id=share_id)
|
||||
|
||||
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
fixes:
|
||||
- Changed implementation of revert-to-snapshot in LVM driver to be
|
||||
synchronous, preventing failure of subsequent operations.
|
Loading…
Reference in New Issue
Block a user