[Container driver] Adds share and share server migration
Implements the driver interface methods for enabling a share or a share server to be migrated. They can only work across backends that work at same volume group. Change-Id: I813801fd64d4d605c0a4fe06c7a130ad00865b6d Depends-On: Ic0751027d2c3f1ef7ab0f7836baff3070a230cfd Implements: bp container-share-server-migration Implements: bp container-share-migration
This commit is contained in:
parent
05fb54962a
commit
ea7e304e33
@ -304,14 +304,9 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
def _delete_export_and_umount_storage(
|
||||
self, share, server_id, share_name, ignore_errors=False):
|
||||
|
||||
self._get_helper(share).delete_share(server_id, share_name,
|
||||
ignore_errors=ignore_errors)
|
||||
self._umount_storage(
|
||||
share, server_id, share_name, ignore_errors=ignore_errors)
|
||||
|
||||
self.container.execute(
|
||||
server_id,
|
||||
["umount", "/shares/%s" % share_name],
|
||||
ignore_errors=ignore_errors
|
||||
)
|
||||
# (aovchinnikov): bug 1621784 manifests itself here as well as in
|
||||
# storage helper. There is a chance that we won't be able to remove
|
||||
# this directory, despite the fact that it is not shared anymore and
|
||||
@ -323,11 +318,25 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
ignore_errors=True
|
||||
)
|
||||
|
||||
def _umount_storage(
|
||||
self, share, server_id, share_name, ignore_errors=False):
|
||||
|
||||
self._get_helper(share).delete_share(server_id, share_name,
|
||||
ignore_errors=ignore_errors)
|
||||
self.container.execute(
|
||||
server_id,
|
||||
["umount", "/shares/%s" % share_name],
|
||||
ignore_errors=ignore_errors
|
||||
)
|
||||
|
||||
def _create_export_and_mount_storage(self, share, server_id, share_name):
|
||||
self.container.execute(
|
||||
server_id,
|
||||
["mkdir", "-m", "750", "/shares/%s" % share_name]
|
||||
)
|
||||
return self._mount_storage(share, server_id, share_name)
|
||||
|
||||
def _mount_storage(self, share, server_id, share_name):
|
||||
lv_device = self.storage._get_lv_device(share_name)
|
||||
self.container.execute(
|
||||
server_id,
|
||||
@ -382,3 +391,161 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
||||
if not self.container.container_exists(name):
|
||||
return self._get_container_name(name)
|
||||
return name
|
||||
|
||||
def migration_check_compatibility(self, context, source_share,
|
||||
destination_share, share_server=None,
|
||||
destination_share_server=None):
|
||||
return self.storage.migration_check_compatibility(
|
||||
context, source_share, destination_share,
|
||||
share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
def migration_start(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
self.storage.migration_start(
|
||||
context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
def migration_continue(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
return self.storage.migration_continue(
|
||||
context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings, share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
def migration_get_progress(self, context, source_share,
|
||||
destination_share, source_snapshots,
|
||||
snapshot_mappings, share_server=None,
|
||||
destination_share_server=None):
|
||||
return self.storage.migration_get_progress(
|
||||
context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings, share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
def migration_cancel(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
self.storage.migration_cancel(
|
||||
context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings, share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
def migration_complete(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
# Removes the source share reference from the source container
|
||||
source_server_id = self._get_container_name(share_server["id"])
|
||||
self._umount_storage(
|
||||
source_share, source_server_id, source_share.share_id)
|
||||
|
||||
# storage removes source share
|
||||
self.storage.migration_complete(
|
||||
context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings, share_server=share_server,
|
||||
destination_share_server=destination_share_server)
|
||||
|
||||
# Enables the access on the destination container
|
||||
destination_server_id = self._get_container_name(
|
||||
destination_share_server["id"])
|
||||
new_export_location = self._mount_storage(
|
||||
destination_share, destination_server_id,
|
||||
destination_share.share_id)
|
||||
|
||||
msg = ("Volume move operation for share %(shr)s was completed "
|
||||
"successfully. Share has been moved from %(src)s to "
|
||||
"%(dest)s.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
'dest': destination_share['host'],
|
||||
}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
return {
|
||||
'export_locations': new_export_location,
|
||||
}
|
||||
|
||||
def share_server_migration_check_compatibility(
|
||||
self, context, share_server, dest_host, old_share_network,
|
||||
new_share_network, shares_request_spec):
|
||||
"""Is called to check migration compatibility for a share server."""
|
||||
return self.storage.share_server_migration_check_compatibility(
|
||||
context, share_server, dest_host, old_share_network,
|
||||
new_share_network, shares_request_spec)
|
||||
|
||||
def share_server_migration_start(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
"""Is called to perform 1st phase of migration of a share server."""
|
||||
LOG.debug(
|
||||
"Migration of share server with ID '%s' has been started.",
|
||||
src_share_server["id"])
|
||||
self.storage.share_server_migration_start(
|
||||
context, src_share_server, dest_share_server, shares, snapshots)
|
||||
|
||||
def share_server_migration_continue(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
|
||||
return self.storage.share_server_migration_continue(
|
||||
context, src_share_server, dest_share_server, shares, snapshots)
|
||||
|
||||
def share_server_migration_cancel(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
"""Is called to cancel a share server migration."""
|
||||
self.storage.share_server_migration_cancel(
|
||||
context, src_share_server, dest_share_server, shares, snapshots)
|
||||
LOG.debug(
|
||||
"Migration of share server with ID '%s' has been canceled.",
|
||||
src_share_server["id"])
|
||||
return
|
||||
|
||||
def share_server_migration_get_progress(self, context, src_share_server,
|
||||
dest_share_server, shares,
|
||||
snapshots):
|
||||
"""Is called to get share server migration progress."""
|
||||
return self.storage.share_server_migration_get_progress(
|
||||
context, src_share_server, dest_share_server, shares, snapshots)
|
||||
|
||||
def share_server_migration_complete(self, context, source_share_server,
|
||||
dest_share_server, shares, snapshots,
|
||||
new_network_allocations):
|
||||
# Removes the source shares reference from the source container
|
||||
source_server_id = self._get_container_name(source_share_server["id"])
|
||||
for source_share in shares:
|
||||
self._umount_storage(
|
||||
source_share, source_server_id, source_share.share_id)
|
||||
|
||||
# storage removes source share
|
||||
self.storage.share_server_migration_complete(
|
||||
context, source_share_server, dest_share_server, shares, snapshots,
|
||||
new_network_allocations)
|
||||
|
||||
destination_server_id = self._get_container_name(
|
||||
dest_share_server["id"])
|
||||
shares_updates = {}
|
||||
for destination_share in shares:
|
||||
share_id = destination_share.share_id
|
||||
new_export_location = self._mount_storage(
|
||||
destination_share, destination_server_id, share_id)
|
||||
|
||||
shares_updates[destination_share['id']] = {
|
||||
'export_locations': new_export_location,
|
||||
'pool_name': self.storage.get_share_pool_name(share_id),
|
||||
}
|
||||
|
||||
msg = ("Volumes move operation from server %(server)s were completed "
|
||||
"successfully. Share server has been moved from %(src)s to "
|
||||
"%(dest)s.")
|
||||
msg_args = {
|
||||
'serv': source_share_server['id'],
|
||||
'src': source_share_server['host'],
|
||||
'dest': dest_share_server['host'],
|
||||
}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
return {
|
||||
'share_updates': shares_updates,
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ from oslo_log import log
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.share import driver
|
||||
|
||||
from manila.share import utils as share_utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -140,3 +140,183 @@ class LVMHelper(driver.ExecuteMixin):
|
||||
LOG.debug("Found size %(size)s for LVM device "
|
||||
"%(lvm)s.", {'size': size[0], 'lvm': share_name})
|
||||
return size[0]
|
||||
|
||||
def migration_check_compatibility(self, context, source_share,
|
||||
destination_share, share_server=None,
|
||||
destination_share_server=None):
|
||||
"""Checks compatibility between self.host and destination host."""
|
||||
# They must be in same vg and host
|
||||
compatible = False
|
||||
destination_host = destination_share['host']
|
||||
source_host = source_share['host']
|
||||
destination_vg = share_utils.extract_host(
|
||||
destination_host, level='pool')
|
||||
source_vg = share_utils.extract_host(
|
||||
source_host, level='pool')
|
||||
|
||||
if destination_vg != source_vg:
|
||||
msg = ("Cannot migrate share %(shr)s between "
|
||||
"%(src)s and %(dest)s, they must be in the same volume "
|
||||
"group.")
|
||||
msg_args = {
|
||||
'shr': source_share['id'],
|
||||
'src': source_share['host'],
|
||||
'dest': destination_host,
|
||||
}
|
||||
LOG.exception(msg, msg_args)
|
||||
else:
|
||||
compatible = True
|
||||
|
||||
compatibility = {
|
||||
'compatible': compatible,
|
||||
'writable': True,
|
||||
'nondisruptive': False,
|
||||
'preserve_metadata': True,
|
||||
'preserve_snapshots': False,
|
||||
}
|
||||
|
||||
return compatibility
|
||||
|
||||
def migration_start(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
"""Starts the migration of the share from one host to another."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to copy the data between the volumes.
|
||||
return
|
||||
|
||||
def migration_continue(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
"""Check the progress of the migration."""
|
||||
return True
|
||||
|
||||
def migration_get_progress(self, context, source_share,
|
||||
destination_share, source_snapshots,
|
||||
snapshot_mappings, share_server=None,
|
||||
destination_share_server=None):
|
||||
"""Return detailed progress of the migration in progress."""
|
||||
return {
|
||||
'total_progress': 100,
|
||||
}
|
||||
|
||||
def migration_cancel(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
"""Abort an ongoing migration."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to cancel the copy of the data.
|
||||
return
|
||||
|
||||
def migration_complete(self, context, source_share, destination_share,
|
||||
source_snapshots, snapshot_mappings,
|
||||
share_server=None, destination_share_server=None):
|
||||
"""Completes by removing the source local volume."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to remove source lv.
|
||||
return
|
||||
|
||||
def share_server_migration_check_compatibility(
|
||||
self, context, share_server, dest_host, old_share_network,
|
||||
new_share_network, shares_request_spec):
|
||||
"""Is called to check migration compatibility for a share server."""
|
||||
not_compatible = {
|
||||
'compatible': False,
|
||||
'writable': None,
|
||||
'nondisruptive': None,
|
||||
'preserve_snapshots': None,
|
||||
'migration_cancel': None,
|
||||
'migration_get_progress': None
|
||||
}
|
||||
|
||||
dest_backend_name = share_utils.extract_host(dest_host,
|
||||
level='backend_name')
|
||||
source_backend_name = share_utils.extract_host(share_server['host'],
|
||||
level='backend_name')
|
||||
if dest_backend_name == source_backend_name:
|
||||
msg = _("Cannot perform server migration %(server)s within the "
|
||||
"same backend. Please choose a destination host different "
|
||||
"from the source.")
|
||||
msg_args = {
|
||||
'server': share_server['id'],
|
||||
}
|
||||
LOG.error(msg, msg_args)
|
||||
return not_compatible
|
||||
|
||||
# The container backend has only one pool, gets its pool name from the
|
||||
# first instance.
|
||||
first_share = shares_request_spec['shares_req_spec'][0]
|
||||
source_host = first_share['share_instance_properties']['host']
|
||||
source_vg = share_utils.extract_host(
|
||||
source_host, level='pool')
|
||||
dest_vg = share_utils.extract_host(
|
||||
dest_host, level='pool')
|
||||
if dest_vg and dest_vg != source_vg:
|
||||
msg = ("Cannot migrate share server %(server)s between %(src)s "
|
||||
"and %(dest)s. They must be in the same volume group.")
|
||||
msg_args = {
|
||||
'server': share_server['id'],
|
||||
'src': source_host,
|
||||
'dest': dest_host,
|
||||
}
|
||||
LOG.error(msg, msg_args)
|
||||
return not_compatible
|
||||
|
||||
# NOTE(felipe_rodrigues): it is not required to check the capacity,
|
||||
# because it is migrating in the same volume group.
|
||||
|
||||
return {
|
||||
'compatible': True,
|
||||
'writable': True,
|
||||
'nondisruptive': False,
|
||||
'preserve_snapshots': False,
|
||||
'migration_cancel': True,
|
||||
'migration_get_progress': True
|
||||
}
|
||||
|
||||
def share_server_migration_start(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
"""Is called to perform 1st phase of migration of a share server."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to copy the data between the volumes.
|
||||
return
|
||||
|
||||
def share_server_migration_continue(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
"""Check the progress of the migration."""
|
||||
return True
|
||||
|
||||
def share_server_migration_complete(self, context, source_share_server,
|
||||
dest_share_server, shares, snapshots,
|
||||
new_network_allocations):
|
||||
"""Completes by removing the source local volume."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to remove source lv.
|
||||
return
|
||||
|
||||
def share_server_migration_cancel(self, context, src_share_server,
|
||||
dest_share_server, shares, snapshots):
|
||||
"""Abort an ongoing migration."""
|
||||
|
||||
# NOTE(felipe_rodrigues): Since they are in the same volume group,
|
||||
# there is no need to cancel the copy of the data.
|
||||
return
|
||||
|
||||
def share_server_migration_get_progress(self, context, src_share_server,
|
||||
dest_share_server, shares,
|
||||
snapshots):
|
||||
"""Return detailed progress of the server migration in progress."""
|
||||
|
||||
return {
|
||||
'total_progress': 100,
|
||||
}
|
||||
|
||||
def get_share_pool_name(self, share_id):
|
||||
"""Return the pool name where the share is allocated"""
|
||||
|
||||
return self.configuration.container_volume_group
|
||||
|
@ -74,6 +74,7 @@ def fake_share(**kwargs):
|
||||
'name': 'fakename',
|
||||
'size': 1,
|
||||
'share_proto': 'NFS',
|
||||
'host': 'host@backend#vg',
|
||||
'export_location': '127.0.0.1:/mnt/nfs/volume-00002',
|
||||
}
|
||||
share.update(kwargs)
|
||||
|
@ -522,3 +522,89 @@ class ContainerShareDriverTestCase(test.TestCase):
|
||||
mock_container_exists.assert_called_once_with(
|
||||
fake_name
|
||||
)
|
||||
|
||||
def test_migration_complete(self):
|
||||
share_server = {'id': 'fakeid'}
|
||||
fake_container_name = 'manila_fake_container'
|
||||
new_export_location = 'new_export_location'
|
||||
|
||||
mock_migraton_storage = self.mock_object(self._driver.storage,
|
||||
'migration_complete')
|
||||
mock_get_container_name = self.mock_object(
|
||||
self._driver, '_get_container_name',
|
||||
mock.Mock(return_value=fake_container_name))
|
||||
|
||||
mock_mount = self.mock_object(
|
||||
self._driver, '_mount_storage',
|
||||
mock.Mock(return_value=new_export_location))
|
||||
|
||||
mock_umount = self.mock_object(self._driver, '_umount_storage')
|
||||
|
||||
expected_location = {'export_locations': new_export_location}
|
||||
self.assertEqual(expected_location,
|
||||
self._driver.migration_complete(
|
||||
self._context, self.share, self.share, None,
|
||||
None, share_server, share_server))
|
||||
|
||||
mock_migraton_storage.assert_called_once_with(
|
||||
self._context, self.share, self.share, None, None,
|
||||
destination_share_server=share_server, share_server=share_server
|
||||
)
|
||||
mock_mount.assert_called_once_with(
|
||||
self.share, fake_container_name, self.share.share_id
|
||||
)
|
||||
mock_umount.assert_called_once_with(
|
||||
self.share, fake_container_name, self.share.share_id
|
||||
)
|
||||
mock_get_container_name.assert_called_with(
|
||||
share_server['id']
|
||||
)
|
||||
|
||||
def test_share_server_migration_complete(self):
|
||||
source_server = {'id': 'source_fake_id', 'host': 'host@back1'}
|
||||
dest_server = {'id': 'dest_fake_id', 'host': 'host@back2'}
|
||||
fake_container_name = 'manila_fake_container'
|
||||
new_export_location = 'new_export_location'
|
||||
fake_pool_name = 'fake_vg'
|
||||
shares_list = [self.share, self.share]
|
||||
|
||||
mock_get_container_name = self.mock_object(
|
||||
self._driver, '_get_container_name',
|
||||
mock.Mock(return_value=fake_container_name))
|
||||
mock_umount = self.mock_object(self._driver, '_umount_storage')
|
||||
mock_migraton_storage = self.mock_object(
|
||||
self._driver.storage, 'share_server_migration_complete')
|
||||
mock_mount = self.mock_object(
|
||||
self._driver, '_mount_storage',
|
||||
mock.Mock(return_value=new_export_location))
|
||||
mock_get_pool = self.mock_object(
|
||||
self._driver.storage, 'get_share_pool_name',
|
||||
mock.Mock(return_value=fake_pool_name))
|
||||
|
||||
share_updates = {}
|
||||
for fake_share in shares_list:
|
||||
share_updates[fake_share['id']] = {
|
||||
'export_locations': new_export_location,
|
||||
'pool_name': fake_pool_name,
|
||||
}
|
||||
|
||||
expected_result = {
|
||||
'share_updates': share_updates,
|
||||
}
|
||||
self.assertDictMatch(expected_result,
|
||||
self._driver.share_server_migration_complete(
|
||||
self._context, source_server, dest_server,
|
||||
shares_list, None, None))
|
||||
mock_migraton_storage.assert_called_once_with(
|
||||
self._context, source_server, dest_server, shares_list, None, None)
|
||||
|
||||
# assert shares
|
||||
for fake_share in shares_list:
|
||||
mock_get_pool.assert_any_call(fake_share['share_id'])
|
||||
mock_umount.assert_any_call(fake_share, fake_container_name,
|
||||
fake_share.share_id)
|
||||
mock_mount.assert_any_call(fake_share, fake_container_name,
|
||||
fake_share.share_id)
|
||||
|
||||
mock_get_container_name.assert_any_call(source_server['id'])
|
||||
mock_get_container_name.assert_any_call(dest_server['id'])
|
||||
|
@ -23,6 +23,7 @@ from manila import exception
|
||||
from manila.share import configuration
|
||||
from manila.share.drivers.container import storage_helper
|
||||
from manila import test
|
||||
from manila.tests import fake_share as base_fake_share
|
||||
from manila.tests.share.drivers.container.fakes import fake_share
|
||||
|
||||
|
||||
@ -36,6 +37,7 @@ class LVMHelperTestCase(test.TestCase):
|
||||
self.fake_conf = configuration.Configuration(None)
|
||||
self.fake_conf.container_volume_mount_path = "/tmp/shares"
|
||||
self.LVMHelper = storage_helper.LVMHelper(configuration=self.fake_conf)
|
||||
self.context = mock.Mock()
|
||||
|
||||
def fake_exec_sync(self, *args, **kwargs):
|
||||
kwargs['execute_arguments'].append(args)
|
||||
@ -207,3 +209,123 @@ class LVMHelperTestCase(test.TestCase):
|
||||
)
|
||||
mock_get_lv_device.assert_called_once_with(share_name)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
@ddt.data({'source_host': 'host@back1#vg1', 'dest_host': 'host@back2#vg2',
|
||||
'compatible': False},
|
||||
{'source_host': 'host@back1#vg1', 'dest_host': 'host@back2#vg1',
|
||||
'compatible': True},
|
||||
{'source_host': 'host@back1#vg1', 'dest_host': 'host@back1#vg1',
|
||||
'compatible': True})
|
||||
@ddt.unpack
|
||||
def test_migration_check_compatibility(
|
||||
self, source_host, dest_host, compatible):
|
||||
mock_exception_log = self.mock_object(storage_helper.LOG, 'exception')
|
||||
|
||||
source_share = base_fake_share.fake_share_instance(host=source_host)
|
||||
dest_share = base_fake_share.fake_share_instance(host=dest_host)
|
||||
|
||||
migration_compatibility = self.LVMHelper.migration_check_compatibility(
|
||||
self.context, source_share, dest_share, share_server=None,
|
||||
destination_share_server=None)
|
||||
|
||||
expected_compatibility = {
|
||||
'compatible': compatible,
|
||||
'writable': True,
|
||||
'nondisruptive': False,
|
||||
'preserve_metadata': True,
|
||||
'preserve_snapshots': False,
|
||||
}
|
||||
self.assertDictMatch(expected_compatibility, migration_compatibility)
|
||||
if not compatible:
|
||||
mock_exception_log.assert_called_once()
|
||||
|
||||
def test_migration_continue(self):
|
||||
end1Phase = self.LVMHelper.migration_continue(
|
||||
self.context, None, None, None, None, share_server=None,
|
||||
destination_share_server=None)
|
||||
self.assertTrue(end1Phase)
|
||||
|
||||
def test_migration_get_progress(self):
|
||||
progress = self.LVMHelper.migration_get_progress(
|
||||
self.context, None, None, None, None, share_server=None,
|
||||
destination_share_server=None)
|
||||
expected_progress = {
|
||||
'total_progress': 100,
|
||||
}
|
||||
self.assertDictMatch(expected_progress, progress)
|
||||
|
||||
@ddt.data({'source_host': 'host@back1', 'dest_host': 'host@back1',
|
||||
'shares_specs': {}},
|
||||
{'source_host': 'host@back1', 'dest_host': 'host@back2#vg1',
|
||||
'shares_specs': {'shares_req_spec': [
|
||||
{'share_instance_properties': {'host': 'host@back1#vg2'}}
|
||||
]}})
|
||||
@ddt.unpack
|
||||
def test_share_server_migration_check_compatibility_false(
|
||||
self, source_host, dest_host, shares_specs):
|
||||
not_compatible = {
|
||||
'compatible': False,
|
||||
'writable': None,
|
||||
'nondisruptive': None,
|
||||
'preserve_snapshots': None,
|
||||
'migration_cancel': None,
|
||||
'migration_get_progress': None
|
||||
}
|
||||
mock_error_log = self.mock_object(storage_helper.LOG, 'error')
|
||||
|
||||
source_server = {'id': 'fake_id', 'host': source_host}
|
||||
migration_compatibility = (
|
||||
self.LVMHelper.share_server_migration_check_compatibility(
|
||||
self.context, source_server, dest_host, None, None,
|
||||
shares_specs))
|
||||
|
||||
self.assertDictMatch(not_compatible, migration_compatibility)
|
||||
mock_error_log.assert_called_once()
|
||||
|
||||
@ddt.data({'source_host': 'host@back1', 'dest_host': 'host@back2#vg1',
|
||||
'shares_specs': {'shares_req_spec': [
|
||||
{'share_instance_properties': {'host': 'host@back1#vg1'}}
|
||||
]}},
|
||||
{'source_host': 'host@back1', 'dest_host': 'host@back2',
|
||||
'shares_specs': {'shares_req_spec': [
|
||||
{'share_instance_properties': {'host': 'host@back1#vg1'}}
|
||||
]}})
|
||||
@ddt.unpack
|
||||
def test_share_server_migration_check_compatibility_true(
|
||||
self, source_host, dest_host, shares_specs):
|
||||
compatible = {
|
||||
'compatible': True,
|
||||
'writable': True,
|
||||
'nondisruptive': False,
|
||||
'preserve_snapshots': False,
|
||||
'migration_cancel': True,
|
||||
'migration_get_progress': True
|
||||
}
|
||||
|
||||
source_server = {'id': 'fake_id', 'host': source_host}
|
||||
migration_compatibility = (
|
||||
self.LVMHelper.share_server_migration_check_compatibility(
|
||||
self.context, source_server, dest_host, None, None,
|
||||
shares_specs))
|
||||
|
||||
self.assertDictMatch(compatible, migration_compatibility)
|
||||
|
||||
def test_share_server_migration_continue(self):
|
||||
end1Phase = self.LVMHelper.share_server_migration_continue(
|
||||
self.context, None, None, None, None)
|
||||
self.assertTrue(end1Phase)
|
||||
|
||||
def test_share_server_migration_get_progess(self):
|
||||
progress = self.LVMHelper.share_server_migration_get_progress(
|
||||
self.context, None, None, None, None)
|
||||
expected_progress = {
|
||||
'total_progress': 100,
|
||||
}
|
||||
self.assertDictMatch(expected_progress, progress)
|
||||
|
||||
def test_get_share_pool_name(self):
|
||||
fake_vg_name = 'fake_vg'
|
||||
self.LVMHelper.configuration.container_volume_group = fake_vg_name
|
||||
|
||||
vg_name = self.LVMHelper.get_share_pool_name('fake_share_id')
|
||||
self.assertEqual(vg_name, fake_vg_name)
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
The container driver now supports driver assisted share migration and
|
||||
share server migration across share networks, and across backends that
|
||||
share the same underlying volume group (configuration option:
|
||||
container_volume_group).
|
Loading…
Reference in New Issue
Block a user