[ZFSOnLinux] Retry unmounting old datasets during manage
Add a retry loop to ensure the dataset being renamed is cleanly unmounted before the rename operation. Change-Id: Ie506f237010c415ee9f0d64abbefd5854f776a5f Closes-Bug: #1785180
This commit is contained in:
parent
a3838009d3
commit
d7c01efb44
@ -724,15 +724,6 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
"username": self.configuration.zfs_ssh_username,
|
"username": self.configuration.zfs_ssh_username,
|
||||||
"host": self.service_ip,
|
"host": self.service_ip,
|
||||||
}
|
}
|
||||||
self.private_storage.update(
|
|
||||||
share["id"], {
|
|
||||||
"entity_type": "share",
|
|
||||||
"dataset_name": new_dataset_name,
|
|
||||||
"ssh_cmd": ssh_cmd, # used in replication
|
|
||||||
"pool_name": actual_pool_name, # used in replication
|
|
||||||
"used_options": " ".join(options),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform checks on requested dataset
|
# Perform checks on requested dataset
|
||||||
if actual_pool_name != scheduled_pool_name:
|
if actual_pool_name != scheduled_pool_name:
|
||||||
@ -760,13 +751,25 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
"dataset": old_dataset_name,
|
"dataset": old_dataset_name,
|
||||||
"zpool": actual_pool_name})
|
"zpool": actual_pool_name})
|
||||||
|
|
||||||
# Rename dataset
|
# Unmount the dataset before attempting to rename and mount
|
||||||
out, err = self.execute("sudo", "mount")
|
try:
|
||||||
if "%s " % old_dataset_name in out:
|
self._unmount_share_with_retry(old_dataset_name)
|
||||||
self.zfs_with_retry("umount", "-f", old_dataset_name)
|
except exception.ZFSonLinuxException:
|
||||||
time.sleep(1)
|
msg = _("Unable to unmount share before renaming and re-mounting.")
|
||||||
|
raise exception.ZFSonLinuxException(message=msg)
|
||||||
|
|
||||||
|
# Rename the dataset and mount with new name
|
||||||
self.zfs_with_retry("rename", old_dataset_name, new_dataset_name)
|
self.zfs_with_retry("rename", old_dataset_name, new_dataset_name)
|
||||||
self.zfs("mount", new_dataset_name)
|
|
||||||
|
try:
|
||||||
|
self.zfs("mount", new_dataset_name)
|
||||||
|
except exception.ProcessExecutionError:
|
||||||
|
# Workaround for bug/1785180
|
||||||
|
out, err = self.zfs("mount")
|
||||||
|
mounted = any([new_dataset_name in mountedfs
|
||||||
|
for mountedfs in out.splitlines()])
|
||||||
|
if not mounted:
|
||||||
|
raise
|
||||||
|
|
||||||
# Apply options to dataset
|
# Apply options to dataset
|
||||||
for option in options:
|
for option in options:
|
||||||
@ -776,6 +779,16 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
export_locations = self._get_share_helper(
|
export_locations = self._get_share_helper(
|
||||||
share["share_proto"]).get_exports(new_dataset_name)
|
share["share_proto"]).get_exports(new_dataset_name)
|
||||||
|
|
||||||
|
self.private_storage.update(
|
||||||
|
share["id"], {
|
||||||
|
"entity_type": "share",
|
||||||
|
"dataset_name": new_dataset_name,
|
||||||
|
"ssh_cmd": ssh_cmd, # used in replication
|
||||||
|
"pool_name": actual_pool_name, # used in replication
|
||||||
|
"used_options": " ".join(options),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
return {"size": share["size"], "export_locations": export_locations}
|
return {"size": share["size"], "export_locations": export_locations}
|
||||||
|
|
||||||
def unmanage(self, share):
|
def unmanage(self, share):
|
||||||
@ -836,6 +849,17 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
"""Unmanage dataset snapshot."""
|
"""Unmanage dataset snapshot."""
|
||||||
self.private_storage.delete(snapshot_instance["snapshot_id"])
|
self.private_storage.delete(snapshot_instance["snapshot_id"])
|
||||||
|
|
||||||
|
@utils.retry(exception.ZFSonLinuxException)
|
||||||
|
def _unmount_share_with_retry(self, share_name):
|
||||||
|
out, err = self.execute("sudo", "mount")
|
||||||
|
if "%s " % share_name not in out:
|
||||||
|
return
|
||||||
|
self.zfs_with_retry("umount", "-f", share_name)
|
||||||
|
out, err = self.execute("sudo", "mount")
|
||||||
|
if "%s " % share_name in out:
|
||||||
|
raise exception.ZFSonLinuxException(
|
||||||
|
_("Unable to unmount dataset %s"), share_name)
|
||||||
|
|
||||||
def _get_replication_snapshot_prefix(self, replica):
|
def _get_replication_snapshot_prefix(self, replica):
|
||||||
"""Returns replica-based snapshot prefix."""
|
"""Returns replica-based snapshot prefix."""
|
||||||
replication_snapshot_prefix = "%s_%s" % (
|
replication_snapshot_prefix = "%s_%s" % (
|
||||||
|
@ -1138,7 +1138,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
zfs_driver.share_types,
|
zfs_driver.share_types,
|
||||||
'get_extra_specs_from_share',
|
'get_extra_specs_from_share',
|
||||||
mock.Mock(return_value={}))
|
mock.Mock(return_value={}))
|
||||||
mock_sleep = self.mock_object(zfs_driver.time, "sleep")
|
self.mock_object(zfs_driver.time, "sleep")
|
||||||
mock__get_dataset_name = self.mock_object(
|
mock__get_dataset_name = self.mock_object(
|
||||||
self.driver, "_get_dataset_name",
|
self.driver, "_get_dataset_name",
|
||||||
mock.Mock(return_value=new_dataset_name))
|
mock.Mock(return_value=new_dataset_name))
|
||||||
@ -1148,11 +1148,17 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
mock.Mock(return_value=("fake_out", "fake_error")))
|
mock.Mock(return_value=("fake_out", "fake_error")))
|
||||||
mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry")
|
mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry")
|
||||||
|
|
||||||
mock_execute = self.mock_object(self.driver, "execute")
|
mock_execute_side_effects = [
|
||||||
|
("%s " % old_dataset_name, "fake_err")
|
||||||
|
if mount_exists else ("foo", "bar")
|
||||||
|
] * 3
|
||||||
if mount_exists:
|
if mount_exists:
|
||||||
mock_execute.return_value = "%s " % old_dataset_name, "fake_err"
|
# After three retries, assume the mount goes away
|
||||||
else:
|
mock_execute_side_effects.append((("foo", "bar")))
|
||||||
mock_execute.return_value = ("foo", "bar")
|
mock_execute = self.mock_object(
|
||||||
|
self.driver, "execute",
|
||||||
|
mock.Mock(side_effect=iter(mock_execute_side_effects)))
|
||||||
|
|
||||||
mock_parse_zfs_answer = self.mock_object(
|
mock_parse_zfs_answer = self.mock_object(
|
||||||
self.driver,
|
self.driver,
|
||||||
"parse_zfs_answer",
|
"parse_zfs_answer",
|
||||||
@ -1175,9 +1181,11 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
mock_helper.return_value.get_exports.return_value,
|
mock_helper.return_value.get_exports.return_value,
|
||||||
result["export_locations"])
|
result["export_locations"])
|
||||||
|
mock_execute.assert_called_with("sudo", "mount")
|
||||||
if mount_exists:
|
if mount_exists:
|
||||||
mock_sleep.assert_called_once_with(1)
|
self.assertEqual(4, mock_execute.call_count)
|
||||||
mock_execute.assert_called_once_with("sudo", "mount")
|
else:
|
||||||
|
self.assertEqual(1, mock_execute.call_count)
|
||||||
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
||||||
if driver_options.get("size"):
|
if driver_options.get("size"):
|
||||||
self.assertFalse(mock_get_zfs_option.called)
|
self.assertFalse(mock_get_zfs_option.called)
|
||||||
@ -1259,6 +1267,62 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
||||||
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
||||||
|
|
||||||
|
def test_manage_unmount_exception(self):
|
||||||
|
old_ds_name = "foopool/path/to/old/dataset/name"
|
||||||
|
new_ds_name = "foopool/path/to/new/dataset/name"
|
||||||
|
share = {
|
||||||
|
"id": "fake_share_instance_id",
|
||||||
|
"share_id": "fake_share_id",
|
||||||
|
"export_locations": [{"path": "1.1.1.1:/%s" % old_ds_name}],
|
||||||
|
"host": "foobackend@foohost#foopool",
|
||||||
|
"share_proto": "NFS",
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_get_extra_specs_from_share = self.mock_object(
|
||||||
|
zfs_driver.share_types,
|
||||||
|
'get_extra_specs_from_share',
|
||||||
|
mock.Mock(return_value={}))
|
||||||
|
self.mock_object(zfs_driver.time, "sleep")
|
||||||
|
mock__get_dataset_name = self.mock_object(
|
||||||
|
self.driver, "_get_dataset_name",
|
||||||
|
mock.Mock(return_value=new_ds_name))
|
||||||
|
mock_helper = self.mock_object(self.driver, "_get_share_helper")
|
||||||
|
mock_zfs = self.mock_object(
|
||||||
|
self.driver, "zfs",
|
||||||
|
mock.Mock(return_value=("fake_out", "fake_error")))
|
||||||
|
mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry")
|
||||||
|
|
||||||
|
# 10 Retries, would mean 20 calls to check the mount still exists
|
||||||
|
mock_execute_side_effects = [("%s " % old_ds_name, "fake_err")] * 21
|
||||||
|
mock_execute = self.mock_object(
|
||||||
|
self.driver, "execute",
|
||||||
|
mock.Mock(side_effect=mock_execute_side_effects))
|
||||||
|
|
||||||
|
mock_parse_zfs_answer = self.mock_object(
|
||||||
|
self.driver,
|
||||||
|
"parse_zfs_answer",
|
||||||
|
mock.Mock(return_value=[
|
||||||
|
{"NAME": "some_other_dataset_1"},
|
||||||
|
{"NAME": old_ds_name},
|
||||||
|
{"NAME": "some_other_dataset_2"},
|
||||||
|
]))
|
||||||
|
mock_get_zfs_option = self.mock_object(
|
||||||
|
self.driver, 'get_zfs_option', mock.Mock(return_value="4G"))
|
||||||
|
|
||||||
|
self.assertRaises(exception.ZFSonLinuxException,
|
||||||
|
self.driver.manage_existing,
|
||||||
|
share, {'size': 10})
|
||||||
|
|
||||||
|
self.assertFalse(mock_helper.return_value.get_exports.called)
|
||||||
|
mock_zfs_with_retry.assert_called_with("umount", "-f", old_ds_name)
|
||||||
|
mock_execute.assert_called_with("sudo", "mount")
|
||||||
|
self.assertEqual(10, mock_zfs_with_retry.call_count)
|
||||||
|
self.assertEqual(20, mock_execute.call_count)
|
||||||
|
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
||||||
|
self.assertFalse(mock_get_zfs_option.called)
|
||||||
|
mock__get_dataset_name.assert_called_once_with(share)
|
||||||
|
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
||||||
|
|
||||||
def test_unmanage(self):
|
def test_unmanage(self):
|
||||||
share = {'id': 'fake_share_id'}
|
share = {'id': 'fake_share_id'}
|
||||||
self.mock_object(self.driver.private_storage, 'delete')
|
self.mock_object(self.driver.private_storage, 'delete')
|
||||||
|
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
The ZFSOnLinux driver now retries unmounting zfs shares
|
||||||
|
to perform the manage operation. See `Launchpad bug 1785180
|
||||||
|
<https://bugs.launchpad.net/manila/+bug/1785180>`_ for details.
|
Loading…
Reference in New Issue
Block a user