Merge "glusterfs/vol layout: remove manila-created vols upon delete_share"
This commit is contained in:
commit
5b2b6f33d7
manila
@ -77,6 +77,7 @@ CONF.register_opts(glusterfs_volume_mapped_opts)
|
||||
# Currently we handle only #{size}.
|
||||
PATTERN_DICT = {'size': {'pattern': '(?P<size>\d+)', 'trans': int}}
|
||||
USER_MANILA_SHARE = 'user.manila-share'
|
||||
USER_CLONED_FROM = 'user.manila-cloned-from'
|
||||
UUID_RE = re.compile('\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I)
|
||||
|
||||
|
||||
@ -424,8 +425,27 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
volume back in the available list.
|
||||
"""
|
||||
gmgr = self._share_manager(share)
|
||||
clone_of = gmgr.get_gluster_vol_option(USER_CLONED_FROM) or ''
|
||||
try:
|
||||
self._wipe_gluster_vol(gmgr)
|
||||
if UUID_RE.search(clone_of):
|
||||
# We take responsibility for the lifecycle
|
||||
# management of those volumes which were
|
||||
# created by us (as snapshot clones) ...
|
||||
args = ('volume', 'delete', gmgr.volume)
|
||||
else:
|
||||
# ... for volumes that come from the pool, we return
|
||||
# them to the pool (after some purification rituals)
|
||||
self._wipe_gluster_vol(gmgr)
|
||||
args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE,
|
||||
'NONE')
|
||||
try:
|
||||
gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Gluster command failed: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException(
|
||||
_("gluster %(cmd)s failed on %(vol)s") %
|
||||
{'cmd': ' '.join(args), 'vol': gmgr.qualified})
|
||||
|
||||
self._push_gluster_vol(gmgr.qualified)
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error during delete_share request for "
|
||||
@ -434,15 +454,6 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
raise
|
||||
|
||||
self.private_storage.delete(share['id'])
|
||||
|
||||
args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE, 'NONE')
|
||||
try:
|
||||
gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError:
|
||||
raise exception.GlusterfsException(
|
||||
_("gluster %(cmd)s failed on %(vol)s") %
|
||||
{'cmd': ' '.join(args), 'vol': gmgr.qualified})
|
||||
|
||||
# TODO(deepakcs): Disable quota.
|
||||
|
||||
@staticmethod
|
||||
@ -498,8 +509,11 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
for args in args_tuple:
|
||||
out, err = old_gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error creating share from snapshot: %s"),
|
||||
exc.stderr)
|
||||
LOG.error(_LE("Error creating share from snapshot "
|
||||
"%(snap)s of share %(share)s: %(err)s"),
|
||||
{'snap': snapshot['id'],
|
||||
'share': snapshot['share_instance']['id'],
|
||||
'err': exc.stderr})
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
@ -511,27 +525,26 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
{'share': share, 'manager': gmgr},
|
||||
{'share': snapshot['share_instance'], 'manager': old_gmgr})
|
||||
|
||||
try:
|
||||
gmgr.gluster_call(
|
||||
'volume', 'start', gmgr.volume)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error starting gluster volume. "
|
||||
"Volume: %(volname)s, Error: %(error)s") %
|
||||
{'volname': gmgr.volume, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
argseq = (('set',
|
||||
[USER_CLONED_FROM, snapshot['share_id']]),
|
||||
('set', [USER_MANILA_SHARE, share['id']]),
|
||||
('start', []))
|
||||
for op, opargs in argseq:
|
||||
args = ['volume', op, gmgr.volume] + opargs
|
||||
try:
|
||||
gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error creating share from snapshot "
|
||||
"%(snap)s of share %(share)s: %(err)s."),
|
||||
{'snap': snapshot['id'],
|
||||
'share': snapshot['share_instance']['id'],
|
||||
'err': exc.stderr})
|
||||
raise exception.GlusterfsException(_("gluster %s failed.") %
|
||||
' '.join(args))
|
||||
|
||||
self.gluster_used_vols.add(gmgr.qualified)
|
||||
self.private_storage.update(share['id'], {'volume': gmgr.qualified})
|
||||
|
||||
args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE, share['id'])
|
||||
try:
|
||||
gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError:
|
||||
raise exception.GlusterfsException(
|
||||
_("gluster %(cmd)s failed on %(vol)s") %
|
||||
{'cmd': ' '.join(args), 'vol': gmgr.qualified})
|
||||
|
||||
return export
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
|
@ -611,18 +611,22 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
self._layout._pop_gluster_vol.assert_called_once_with(
|
||||
share['size'])
|
||||
|
||||
def test_delete_share(self):
|
||||
@ddt.data(None, '', 'Eeyore')
|
||||
def test_delete_share(self, clone_of):
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
gmgr1.gluster_call = mock.Mock()
|
||||
gmgr1.get_gluster_vol_option = mock.Mock(return_value=clone_of)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
self._layout.delete_share(self._context, self.share1)
|
||||
|
||||
gmgr1.get_gluster_vol_option.assert_called_once_with(
|
||||
'user.manila-cloned-from')
|
||||
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
|
||||
self._layout._push_gluster_vol.assert_called_once_with(
|
||||
self.glusterfs_target1)
|
||||
@ -631,6 +635,29 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
gmgr1.gluster_call.assert_called_once_with(
|
||||
'volume', 'set', 'gv1', 'user.manila-share', 'NONE')
|
||||
|
||||
def test_delete_share_clone(self):
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
gmgr1.gluster_call = mock.Mock()
|
||||
gmgr1.get_gluster_vol_option = mock.Mock(return_value=FAKE_UUID1)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
self._layout.delete_share(self._context, self.share1)
|
||||
|
||||
gmgr1.get_gluster_vol_option.assert_called_once_with(
|
||||
'user.manila-cloned-from')
|
||||
self.assertFalse(self._layout._wipe_gluster_vol.called)
|
||||
self._layout._push_gluster_vol.assert_called_once_with(
|
||||
self.glusterfs_target1)
|
||||
self._layout.private_storage.delete.assert_called_once_with(
|
||||
self.share1['id'])
|
||||
gmgr1.gluster_call.assert_called_once_with(
|
||||
'volume', 'delete', 'gv1')
|
||||
|
||||
@ddt.data({'trouble': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'trouble': RuntimeError, '_exception': RuntimeError})
|
||||
@ -641,6 +668,7 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
gmgr1.gluster_call = mock.Mock(side_effect=trouble)
|
||||
gmgr1.get_gluster_vol_option = mock.Mock(return_value=None)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
@ -648,14 +676,40 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
self.assertRaises(_exception, self._layout.delete_share,
|
||||
self._context, self.share1)
|
||||
|
||||
gmgr1.get_gluster_vol_option.assert_called_once_with(
|
||||
'user.manila-cloned-from')
|
||||
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
|
||||
self._layout._push_gluster_vol.assert_called_once_with(
|
||||
self.glusterfs_target1)
|
||||
self._layout.private_storage.delete.assert_called_once_with(
|
||||
self.share1['id'])
|
||||
self.assertFalse(self._layout._push_gluster_vol.called)
|
||||
self.assertFalse(self._layout.private_storage.delete.called)
|
||||
gmgr1.gluster_call.assert_called_once_with(
|
||||
'volume', 'set', 'gv1', 'user.manila-share', 'NONE')
|
||||
|
||||
@ddt.data({'trouble': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'trouble': RuntimeError, '_exception': RuntimeError})
|
||||
@ddt.unpack
|
||||
def test_delete_share_gluster_call_clone_error(self, trouble, _exception):
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
gmgr1.gluster_call = mock.Mock(side_effect=trouble)
|
||||
gmgr1.get_gluster_vol_option = mock.Mock(return_value=FAKE_UUID1)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
self.assertRaises(_exception, self._layout.delete_share,
|
||||
self._context, self.share1)
|
||||
|
||||
gmgr1.get_gluster_vol_option.assert_called_once_with(
|
||||
'user.manila-cloned-from')
|
||||
self.assertFalse(self._layout._wipe_gluster_vol.called)
|
||||
self.assertFalse(self._layout._push_gluster_vol.called)
|
||||
self.assertFalse(self._layout.private_storage.delete.called)
|
||||
gmgr1.gluster_call.assert_called_once_with(
|
||||
'volume', 'delete', 'gv1')
|
||||
|
||||
def test_delete_share_error(self):
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol.side_effect = (
|
||||
@ -663,6 +717,7 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
gmgr1.get_gluster_vol_option = mock.Mock(return_value=None)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
@ -843,7 +898,8 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
share = new_share()
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_instance': new_share(export_location=glusterfs_target)
|
||||
'share_instance': new_share(export_location=glusterfs_target),
|
||||
'share_id': 'fake_share_id',
|
||||
}
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
new_vol_addr = ':/'.join([glusterfs_server, volume])
|
||||
@ -855,7 +911,7 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
self.mock_object(old_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
self.mock_object(new_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
mock.Mock(side_effect=[('', ''), ('', ''), ('', '')]))
|
||||
self.mock_object(new_gmgr, 'get_gluster_vol_option',
|
||||
mock.Mock())
|
||||
new_gmgr.get_gluster_vol_option.return_value = (
|
||||
@ -879,8 +935,11 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
('snapshot', 'clone', volume, 'fake_snap_id_xyz'))
|
||||
old_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
args = (('volume', 'start', volume),
|
||||
('volume', 'set', volume, 'user.manila-share', share['id']))
|
||||
new_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
('volume', 'set', volume, 'user.manila-share', share['id']),
|
||||
('volume', 'set', volume, 'user.manila-cloned-from',
|
||||
snapshot['share_id']))
|
||||
new_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args],
|
||||
any_order=True)
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share_instance'])
|
||||
self._layout._glustermanager.assert_called_once_with(
|
||||
@ -930,7 +989,8 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_instance': new_share(export_location=glusterfs_target)
|
||||
'share_instance': new_share(export_location=glusterfs_target),
|
||||
'share_id': 'fake_share_id',
|
||||
}
|
||||
self.assertRaises(_exception,
|
||||
self._layout.create_share_from_snapshot,
|
||||
@ -942,8 +1002,7 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, 'fake_snap_id_xyz'))
|
||||
old_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
args = (('volume', 'start', volume),)
|
||||
new_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
self.assertTrue(new_gmgr.gluster_call.called)
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share_instance'])
|
||||
self._layout._glustermanager.assert_called_once_with(
|
||||
@ -952,72 +1011,6 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
{'manager': new_gmgr, 'share': share},
|
||||
{'manager': old_gmgr, 'share': snapshot['share_instance']})
|
||||
|
||||
@ddt.data({'trouble': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'trouble': RuntimeError, '_exception': RuntimeError})
|
||||
@ddt.unpack
|
||||
def test_create_share_from_snapshot_error_new_gmr_gluster_calls_2nd(
|
||||
self, trouble, _exception):
|
||||
glusterfs_target = 'root@host1:/gv1'
|
||||
glusterfs_server = 'root@host1'
|
||||
share = new_share()
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
new_vol_addr = ':/'.join([glusterfs_server, volume])
|
||||
gmgr = common.GlusterManager
|
||||
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
|
||||
new_gmgr = gmgr(new_vol_addr, self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([glusterfs_target])
|
||||
self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')}
|
||||
self.mock_object(old_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
|
||||
def _gluster_call_gen():
|
||||
yield '', ''
|
||||
raise trouble
|
||||
self.mock_object(new_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=_gluster_call_gen()))
|
||||
self.mock_object(new_gmgr, 'get_gluster_vol_option',
|
||||
mock.Mock())
|
||||
new_gmgr.get_gluster_vol_option.return_value = (
|
||||
'glusterfs-server-1,client')
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
self.mock_object(self._layout, '_share_manager',
|
||||
mock.Mock(return_value=old_gmgr))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=new_gmgr))
|
||||
self.mock_object(self.fake_driver, '_setup_via_manager',
|
||||
mock.Mock(return_value='host1:/gv1'))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_instance': new_share(export_location=glusterfs_target)
|
||||
}
|
||||
self.assertRaises(_exception,
|
||||
self._layout.create_share_from_snapshot,
|
||||
self._context, share, snapshot)
|
||||
|
||||
(self._layout._find_actual_backend_snapshot_name.
|
||||
assert_called_once_with(old_gmgr, snapshot))
|
||||
args = (('snapshot', 'activate', 'fake_snap_id_xyz',
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, 'fake_snap_id_xyz'))
|
||||
old_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
args = (('volume', 'start', volume),)
|
||||
new_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share_instance'])
|
||||
self._layout._glustermanager.assert_called_once_with(
|
||||
gmgr.parse(new_vol_addr))
|
||||
self._layout.driver._setup_via_manager.assert_called_once_with(
|
||||
{'manager': new_gmgr, 'share': share},
|
||||
{'manager': old_gmgr, 'share': snapshot['share_instance']})
|
||||
self._layout.private_storage.update.assert_called_once_with(
|
||||
share['id'], {'volume': new_vol_addr})
|
||||
self.assertIn(
|
||||
new_vol_addr,
|
||||
self._layout.gluster_used_vols)
|
||||
|
||||
@ddt.data({'trouble': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'trouble': RuntimeError, '_exception': RuntimeError})
|
||||
|
Loading…
x
Reference in New Issue
Block a user