Merge "Unity: Add replication support"
This commit is contained in:
commit
4935f604ab
@ -18,35 +18,39 @@ class StoropsException(Exception):
|
|||||||
message = 'Storops Error.'
|
message = 'Storops Error.'
|
||||||
|
|
||||||
|
|
||||||
class UnityLunNameInUseError(StoropsException):
|
class UnityException(StoropsException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityResourceNotFoundError(StoropsException):
|
class UnityLunNameInUseError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnitySnapNameInUseError(StoropsException):
|
class UnityResourceNotFoundError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityDeleteAttachedSnapError(StoropsException):
|
class UnitySnapNameInUseError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityResourceAlreadyAttachedError(StoropsException):
|
class UnityDeleteAttachedSnapError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityPolicyNameInUseError(StoropsException):
|
class UnityResourceAlreadyAttachedError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityNothingToModifyError(StoropsException):
|
class UnityPolicyNameInUseError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityThinCloneLimitExceededError(StoropsException):
|
class UnityNothingToModifyError(UnityException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UnityThinCloneLimitExceededError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -82,15 +86,23 @@ class AdapterSetupError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicationManagerSetupError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class HostDeleteIsCalled(Exception):
|
class HostDeleteIsCalled(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class UnityThinCloneNotAllowedError(StoropsException):
|
class UnityThinCloneNotAllowedError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SystemAPINotSupported(StoropsException):
|
class SystemAPINotSupported(UnityException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UnityDeleteLunInReplicationError(UnityException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ from cinder.tests.unit.volume.drivers.dell_emc.unity \
|
|||||||
import fake_exception as ex
|
import fake_exception as ex
|
||||||
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_client
|
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_client
|
||||||
from cinder.volume.drivers.dell_emc.unity import adapter
|
from cinder.volume.drivers.dell_emc.unity import adapter
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import client
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import replication
|
||||||
|
|
||||||
|
|
||||||
########################
|
########################
|
||||||
@ -61,6 +63,8 @@ class MockConnector(object):
|
|||||||
class MockDriver(object):
|
class MockDriver(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.configuration = mock.Mock(volume_dd_blocksize='1M')
|
self.configuration = mock.Mock(volume_dd_blocksize='1M')
|
||||||
|
self.replication_manager = MockReplicationManager()
|
||||||
|
self.protocol = 'iSCSI'
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _connect_device(conn):
|
def _connect_device(conn):
|
||||||
@ -68,6 +72,27 @@ class MockDriver(object):
|
|||||||
'device': {'path': 'dev'},
|
'device': {'path': 'dev'},
|
||||||
'conn': {'data': {}}}
|
'conn': {'data': {}}}
|
||||||
|
|
||||||
|
def get_version(self):
|
||||||
|
return '1.0.0'
|
||||||
|
|
||||||
|
|
||||||
|
class MockReplicationManager(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.is_replication_configured = False
|
||||||
|
self.replication_devices = {}
|
||||||
|
self.active_backend_id = None
|
||||||
|
self.is_service_failed_over = None
|
||||||
|
self.default_device = None
|
||||||
|
self.active_adapter = None
|
||||||
|
|
||||||
|
def failover_service(self, backend_id):
|
||||||
|
if backend_id == 'default':
|
||||||
|
self.is_service_failed_over = False
|
||||||
|
elif backend_id == 'secondary_unity':
|
||||||
|
self.is_service_failed_over = True
|
||||||
|
else:
|
||||||
|
raise exception.VolumeBackendAPIException()
|
||||||
|
|
||||||
|
|
||||||
class MockClient(object):
|
class MockClient(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -232,6 +257,40 @@ class MockClient(object):
|
|||||||
if dest_pool_id == 'pool_3':
|
if dest_pool_id == 'pool_3':
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_remote_system(self, name=None):
|
||||||
|
if name == 'not-found-remote-system':
|
||||||
|
return None
|
||||||
|
|
||||||
|
return test_client.MockResource(_id='RS_1')
|
||||||
|
|
||||||
|
def get_replication_session(self, name=None):
|
||||||
|
if name == 'not-found-rep-session':
|
||||||
|
raise client.ClientReplicationError()
|
||||||
|
|
||||||
|
rep_session = test_client.MockResource(_id='rep_session_id_1')
|
||||||
|
rep_session.name = name
|
||||||
|
rep_session.src_resource_id = 'sv_1'
|
||||||
|
rep_session.dst_resource_id = 'sv_99'
|
||||||
|
return rep_session
|
||||||
|
|
||||||
|
def create_replication(self, src_lun, max_time_out_of_sync,
|
||||||
|
dst_pool_id, remote_system):
|
||||||
|
if (src_lun.get_id() == 'sv_1' and max_time_out_of_sync == 60
|
||||||
|
and dst_pool_id == 'pool_1'
|
||||||
|
and remote_system.get_id() == 'RS_1'):
|
||||||
|
rep_session = test_client.MockResource(_id='rep_session_id_1')
|
||||||
|
rep_session.name = 'rep_session_name_1'
|
||||||
|
return rep_session
|
||||||
|
return None
|
||||||
|
|
||||||
|
def failover_replication(self, rep_session):
|
||||||
|
if rep_session.name != 'rep_session_name_1':
|
||||||
|
raise client.ClientReplicationError()
|
||||||
|
|
||||||
|
def failback_replication(self, rep_session):
|
||||||
|
if rep_session.name != 'rep_session_name_1':
|
||||||
|
raise client.ClientReplicationError()
|
||||||
|
|
||||||
|
|
||||||
class MockLookupService(object):
|
class MockLookupService(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -253,6 +312,32 @@ class MockOSResource(mock.Mock):
|
|||||||
self.name = kwargs['name']
|
self.name = kwargs['name']
|
||||||
|
|
||||||
|
|
||||||
|
def mock_replication_device(device_conf=None, serial_number=None,
|
||||||
|
max_time_out_of_sync=None,
|
||||||
|
destination_pool_id=None):
|
||||||
|
if device_conf is None:
|
||||||
|
device_conf = {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2'
|
||||||
|
}
|
||||||
|
|
||||||
|
if serial_number is None:
|
||||||
|
serial_number = 'SECONDARY_UNITY_SN'
|
||||||
|
|
||||||
|
if max_time_out_of_sync is None:
|
||||||
|
max_time_out_of_sync = 60
|
||||||
|
|
||||||
|
if destination_pool_id is None:
|
||||||
|
destination_pool_id = 'pool_1'
|
||||||
|
|
||||||
|
rep_device = replication.ReplicationDevice(device_conf, MockDriver())
|
||||||
|
rep_device._adapter = mock_adapter(adapter.CommonAdapter)
|
||||||
|
rep_device._adapter._serial_number = serial_number
|
||||||
|
rep_device.max_time_out_of_sync = max_time_out_of_sync
|
||||||
|
rep_device._dst_pool = test_client.MockResource(_id=destination_pool_id)
|
||||||
|
return rep_device
|
||||||
|
|
||||||
|
|
||||||
def mock_adapter(driver_clz):
|
def mock_adapter(driver_clz):
|
||||||
ret = driver_clz()
|
ret = driver_clz()
|
||||||
ret._client = MockClient()
|
ret._client = MockClient()
|
||||||
@ -460,6 +545,8 @@ class CommonAdapterTest(test.TestCase):
|
|||||||
self.assertTrue(stats['thin_provisioning_support'])
|
self.assertTrue(stats['thin_provisioning_support'])
|
||||||
self.assertTrue(stats['compression_support'])
|
self.assertTrue(stats['compression_support'])
|
||||||
self.assertTrue(stats['consistent_group_snapshot_enabled'])
|
self.assertTrue(stats['consistent_group_snapshot_enabled'])
|
||||||
|
self.assertFalse(stats['replication_enabled'])
|
||||||
|
self.assertEqual(0, len(stats['replication_targets']))
|
||||||
|
|
||||||
def test_update_volume_stats(self):
|
def test_update_volume_stats(self):
|
||||||
stats = self.adapter.update_volume_stats()
|
stats = self.adapter.update_volume_stats()
|
||||||
@ -468,8 +555,26 @@ class CommonAdapterTest(test.TestCase):
|
|||||||
self.assertTrue(stats['thin_provisioning_support'])
|
self.assertTrue(stats['thin_provisioning_support'])
|
||||||
self.assertTrue(stats['thick_provisioning_support'])
|
self.assertTrue(stats['thick_provisioning_support'])
|
||||||
self.assertTrue(stats['consistent_group_snapshot_enabled'])
|
self.assertTrue(stats['consistent_group_snapshot_enabled'])
|
||||||
|
self.assertFalse(stats['replication_enabled'])
|
||||||
|
self.assertEqual(0, len(stats['replication_targets']))
|
||||||
self.assertEqual(1, len(stats['pools']))
|
self.assertEqual(1, len(stats['pools']))
|
||||||
|
|
||||||
|
def test_get_replication_stats(self):
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
'secondary_unity': None
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = self.adapter.update_volume_stats()
|
||||||
|
self.assertTrue(stats['replication_enabled'])
|
||||||
|
self.assertEqual(['secondary_unity'], stats['replication_targets'])
|
||||||
|
|
||||||
|
self.assertEqual(1, len(stats['pools']))
|
||||||
|
pool_stats = stats['pools'][0]
|
||||||
|
self.assertTrue(pool_stats['replication_enabled'])
|
||||||
|
self.assertEqual(['secondary_unity'],
|
||||||
|
pool_stats['replication_targets'])
|
||||||
|
|
||||||
def test_serial_number(self):
|
def test_serial_number(self):
|
||||||
self.assertEqual('CLIENT_SERIAL', self.adapter.serial_number)
|
self.assertEqual('CLIENT_SERIAL', self.adapter.serial_number)
|
||||||
|
|
||||||
@ -1132,6 +1237,162 @@ class CommonAdapterTest(test.TestCase):
|
|||||||
mocked_delete.assert_called_once_with(cg_snap)
|
mocked_delete.assert_called_once_with(cg_snap)
|
||||||
self.assertEqual((None, None), ret)
|
self.assertEqual((None, None), ret)
|
||||||
|
|
||||||
|
def test_setup_replications(self):
|
||||||
|
secondary_device = mock_replication_device()
|
||||||
|
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
'secondary_unity': secondary_device
|
||||||
|
}
|
||||||
|
model_update = self.adapter.setup_replications(
|
||||||
|
test_client.MockResource(_id='sv_1'), {})
|
||||||
|
|
||||||
|
self.assertIn('replication_status', model_update)
|
||||||
|
self.assertEqual('enabled', model_update['replication_status'])
|
||||||
|
|
||||||
|
self.assertIn('replication_driver_data', model_update)
|
||||||
|
self.assertEqual('{"secondary_unity": "rep_session_name_1"}',
|
||||||
|
model_update['replication_driver_data'])
|
||||||
|
|
||||||
|
def test_setup_replications_not_configured_replication(self):
|
||||||
|
model_update = self.adapter.setup_replications(
|
||||||
|
test_client.MockResource(_id='sv_1'), {})
|
||||||
|
self.assertEqual(0, len(model_update))
|
||||||
|
|
||||||
|
def test_setup_replications_raise(self):
|
||||||
|
secondary_device = mock_replication_device(
|
||||||
|
serial_number='not-found-remote-system')
|
||||||
|
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
'secondary_unity': secondary_device
|
||||||
|
}
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
|
self.adapter.setup_replications,
|
||||||
|
test_client.MockResource(_id='sv_1'),
|
||||||
|
{})
|
||||||
|
|
||||||
|
@ddt.data({'failover_to': 'secondary_unity'},
|
||||||
|
{'failover_to': None})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_failover(self, failover_to):
|
||||||
|
secondary_id = 'secondary_unity'
|
||||||
|
secondary_device = mock_replication_device()
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
secondary_id: secondary_device
|
||||||
|
}
|
||||||
|
|
||||||
|
volume = MockOSResource(
|
||||||
|
id='volume-id-1',
|
||||||
|
name='volume-name-1',
|
||||||
|
replication_driver_data='{"secondary_unity":"rep_session_name_1"}')
|
||||||
|
model_update = self.adapter.failover([volume],
|
||||||
|
secondary_id=failover_to)
|
||||||
|
self.assertEqual(3, len(model_update))
|
||||||
|
active_backend_id, volumes_update, groups_update = model_update
|
||||||
|
self.assertEqual(secondary_id, active_backend_id)
|
||||||
|
self.assertEqual([], groups_update)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(volumes_update))
|
||||||
|
model_update = volumes_update[0]
|
||||||
|
self.assertIn('volume_id', model_update)
|
||||||
|
self.assertEqual('volume-id-1', model_update['volume_id'])
|
||||||
|
self.assertIn('updates', model_update)
|
||||||
|
self.assertEqual(
|
||||||
|
{'provider_id': 'sv_99',
|
||||||
|
'provider_location':
|
||||||
|
'id^sv_99|system^SECONDARY_UNITY_SN|type^lun|version^None'},
|
||||||
|
model_update['updates'])
|
||||||
|
self.assertTrue(
|
||||||
|
self.adapter.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
def test_failover_raise(self):
|
||||||
|
secondary_id = 'secondary_unity'
|
||||||
|
secondary_device = mock_replication_device()
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
secondary_id: secondary_device
|
||||||
|
}
|
||||||
|
|
||||||
|
vol1 = MockOSResource(
|
||||||
|
id='volume-id-1',
|
||||||
|
name='volume-name-1',
|
||||||
|
replication_driver_data='{"secondary_unity":"rep_session_name_1"}')
|
||||||
|
vol2 = MockOSResource(
|
||||||
|
id='volume-id-2',
|
||||||
|
name='volume-name-2',
|
||||||
|
replication_driver_data='{"secondary_unity":"rep_session_name_2"}')
|
||||||
|
model_update = self.adapter.failover([vol1, vol2],
|
||||||
|
secondary_id=secondary_id)
|
||||||
|
active_backend_id, volumes_update, groups_update = model_update
|
||||||
|
self.assertEqual(secondary_id, active_backend_id)
|
||||||
|
self.assertEqual([], groups_update)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(volumes_update))
|
||||||
|
m = volumes_update[0]
|
||||||
|
self.assertIn('volume_id', m)
|
||||||
|
self.assertEqual('volume-id-1', m['volume_id'])
|
||||||
|
self.assertIn('updates', m)
|
||||||
|
self.assertEqual(
|
||||||
|
{'provider_id': 'sv_99',
|
||||||
|
'provider_location':
|
||||||
|
'id^sv_99|system^SECONDARY_UNITY_SN|type^lun|version^None'},
|
||||||
|
m['updates'])
|
||||||
|
|
||||||
|
m = volumes_update[1]
|
||||||
|
self.assertIn('volume_id', m)
|
||||||
|
self.assertEqual('volume-id-2', m['volume_id'])
|
||||||
|
self.assertIn('updates', m)
|
||||||
|
self.assertEqual({'replication_status': 'failover-error'},
|
||||||
|
m['updates'])
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
self.adapter.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
def test_failover_failback(self):
|
||||||
|
secondary_id = 'secondary_unity'
|
||||||
|
secondary_device = mock_replication_device()
|
||||||
|
self.adapter.replication_manager.is_replication_configured = True
|
||||||
|
self.adapter.replication_manager.replication_devices = {
|
||||||
|
secondary_id: secondary_device
|
||||||
|
}
|
||||||
|
default_device = mock_replication_device(
|
||||||
|
device_conf={
|
||||||
|
'backend_id': 'default',
|
||||||
|
'san_ip': '10.10.10.10'
|
||||||
|
}, serial_number='PRIMARY_UNITY_SN'
|
||||||
|
)
|
||||||
|
self.adapter.replication_manager.default_device = default_device
|
||||||
|
self.adapter.replication_manager.active_adapter = (
|
||||||
|
self.adapter.replication_manager.replication_devices[
|
||||||
|
secondary_id].adapter)
|
||||||
|
self.adapter.replication_manager.active_backend_id = secondary_id
|
||||||
|
|
||||||
|
volume = MockOSResource(
|
||||||
|
id='volume-id-1',
|
||||||
|
name='volume-name-1',
|
||||||
|
replication_driver_data='{"secondary_unity":"rep_session_name_1"}')
|
||||||
|
model_update = self.adapter.failover([volume],
|
||||||
|
secondary_id='default')
|
||||||
|
active_backend_id, volumes_update, groups_update = model_update
|
||||||
|
self.assertEqual('default', active_backend_id)
|
||||||
|
self.assertEqual([], groups_update)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(volumes_update))
|
||||||
|
model_update = volumes_update[0]
|
||||||
|
self.assertIn('volume_id', model_update)
|
||||||
|
self.assertEqual('volume-id-1', model_update['volume_id'])
|
||||||
|
self.assertIn('updates', model_update)
|
||||||
|
self.assertEqual(
|
||||||
|
{'provider_id': 'sv_1',
|
||||||
|
'provider_location':
|
||||||
|
'id^sv_1|system^PRIMARY_UNITY_SN|type^lun|version^None'},
|
||||||
|
model_update['updates'])
|
||||||
|
self.assertFalse(
|
||||||
|
self.adapter.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
|
||||||
class FCAdapterTest(test.TestCase):
|
class FCAdapterTest(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
@ -63,7 +63,7 @@ class MockResource(object):
|
|||||||
def get_id(self):
|
def get_id(self):
|
||||||
return self._id
|
return self._id
|
||||||
|
|
||||||
def delete(self):
|
def delete(self, force_snap_delete=None):
|
||||||
if self.get_id() in ['snap_2']:
|
if self.get_id() in ['snap_2']:
|
||||||
raise ex.SnapDeleteIsCalled()
|
raise ex.SnapDeleteIsCalled()
|
||||||
elif self.get_id() == 'not_found':
|
elif self.get_id() == 'not_found':
|
||||||
@ -72,6 +72,11 @@ class MockResource(object):
|
|||||||
raise ex.UnityDeleteAttachedSnapError()
|
raise ex.UnityDeleteAttachedSnapError()
|
||||||
elif self.name == 'empty_host':
|
elif self.name == 'empty_host':
|
||||||
raise ex.HostDeleteIsCalled()
|
raise ex.HostDeleteIsCalled()
|
||||||
|
elif self.get_id() == 'lun_in_replication':
|
||||||
|
if not force_snap_delete:
|
||||||
|
raise ex.UnityDeleteLunInReplicationError()
|
||||||
|
elif self.get_id() == 'lun_rep_session_1':
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pool(self):
|
def pool(self):
|
||||||
@ -207,6 +212,21 @@ class MockResource(object):
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync,
|
||||||
|
dst_pool_id,
|
||||||
|
remote_system=None,
|
||||||
|
dst_lun_name=None):
|
||||||
|
return {'max_time_out_of_sync': max_time_out_of_sync,
|
||||||
|
'dst_pool_id': dst_pool_id,
|
||||||
|
'remote_system': remote_system,
|
||||||
|
'dst_lun_name': dst_lun_name}
|
||||||
|
|
||||||
|
def failover(self, sync=None):
|
||||||
|
return {'sync': sync}
|
||||||
|
|
||||||
|
def failback(self, force_full_copy=None):
|
||||||
|
return {'force_full_copy': force_full_copy}
|
||||||
|
|
||||||
|
|
||||||
class MockResourceList(object):
|
class MockResourceList(object):
|
||||||
def __init__(self, names=None, ids=None):
|
def __init__(self, names=None, ids=None):
|
||||||
@ -327,6 +347,28 @@ class MockSystem(object):
|
|||||||
def get_io_limit_policy(name):
|
def get_io_limit_policy(name):
|
||||||
return MockResource(name=name)
|
return MockResource(name=name)
|
||||||
|
|
||||||
|
def get_remote_system(self, name=None):
|
||||||
|
if name == 'not-exist':
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
else:
|
||||||
|
return {'name': name}
|
||||||
|
|
||||||
|
def get_replication_session(self, name=None,
|
||||||
|
src_resource_id=None, dst_resource_id=None):
|
||||||
|
if name == 'not-exist':
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
elif src_resource_id == 'lun_in_replication':
|
||||||
|
return [MockResource(name='rep_session')]
|
||||||
|
elif src_resource_id == 'lun_not_in_replication':
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
elif src_resource_id == 'lun_in_multiple_replications':
|
||||||
|
return [MockResource(_id='lun_rep_session_1'),
|
||||||
|
MockResource(_id='lun_rep_session_2')]
|
||||||
|
else:
|
||||||
|
return {'name': name,
|
||||||
|
'src_resource_id': src_resource_id,
|
||||||
|
'dst_resource_id': dst_resource_id}
|
||||||
|
|
||||||
|
|
||||||
@mock.patch.object(client, 'storops', new='True')
|
@mock.patch.object(client, 'storops', new='True')
|
||||||
def get_client():
|
def get_client():
|
||||||
@ -404,6 +446,15 @@ class ClientTest(unittest.TestCase):
|
|||||||
except ex.StoropsException:
|
except ex.StoropsException:
|
||||||
self.fail('not found error should be dealt with silently.')
|
self.fail('not found error should be dealt with silently.')
|
||||||
|
|
||||||
|
def test_delete_lun_in_replication(self):
|
||||||
|
self.client.delete_lun('lun_in_replication')
|
||||||
|
|
||||||
|
@ddt.data({'lun_id': 'lun_not_in_replication'},
|
||||||
|
{'lun_id': 'lun_in_multiple_replications'})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_delete_lun_replications(self, lun_id):
|
||||||
|
self.client.delete_lun_replications(lun_id)
|
||||||
|
|
||||||
def test_get_lun_with_id(self):
|
def test_get_lun_with_id(self):
|
||||||
lun = self.client.get_lun('lun4')
|
lun = self.client.get_lun('lun4')
|
||||||
self.assertEqual('lun4', lun.get_id())
|
self.assertEqual('lun4', lun.get_id())
|
||||||
@ -748,3 +799,61 @@ class ClientTest(unittest.TestCase):
|
|||||||
ret = self.client.filter_snaps_in_cg_snap('snap_cg_1')
|
ret = self.client.filter_snaps_in_cg_snap('snap_cg_1')
|
||||||
mocked_get.assert_called_once_with(snap_group='snap_cg_1')
|
mocked_get.assert_called_once_with(snap_group='snap_cg_1')
|
||||||
self.assertEqual(snaps, ret)
|
self.assertEqual(snaps, ret)
|
||||||
|
|
||||||
|
def test_create_replication(self):
|
||||||
|
remote_system = MockResource(_id='RS_1')
|
||||||
|
lun = MockResource(_id='sv_1')
|
||||||
|
called = self.client.create_replication(lun, 60, 'pool_1',
|
||||||
|
remote_system)
|
||||||
|
self.assertEqual(called['max_time_out_of_sync'], 60)
|
||||||
|
self.assertEqual(called['dst_pool_id'], 'pool_1')
|
||||||
|
self.assertIs(called['remote_system'], remote_system)
|
||||||
|
|
||||||
|
def test_get_remote_system(self):
|
||||||
|
called = self.client.get_remote_system(name='remote-unity')
|
||||||
|
self.assertEqual(called['name'], 'remote-unity')
|
||||||
|
|
||||||
|
def test_get_remote_system_not_exist(self):
|
||||||
|
called = self.client.get_remote_system(name='not-exist')
|
||||||
|
self.assertIsNone(called)
|
||||||
|
|
||||||
|
def test_get_replication_session(self):
|
||||||
|
called = self.client.get_replication_session(name='rep-name')
|
||||||
|
self.assertEqual(called['name'], 'rep-name')
|
||||||
|
|
||||||
|
def test_get_replication_session_not_exist(self):
|
||||||
|
self.assertRaises(client.ClientReplicationError,
|
||||||
|
self.client.get_replication_session,
|
||||||
|
name='not-exist')
|
||||||
|
|
||||||
|
def test_failover_replication(self):
|
||||||
|
rep_session = MockResource(_id='rep_id_1')
|
||||||
|
called = self.client.failover_replication(rep_session)
|
||||||
|
self.assertEqual(called['sync'], False)
|
||||||
|
|
||||||
|
def test_failover_replication_raise(self):
|
||||||
|
rep_session = MockResource(_id='rep_id_1')
|
||||||
|
|
||||||
|
def mock_failover(sync=None):
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
|
||||||
|
rep_session.failover = mock_failover
|
||||||
|
self.assertRaises(client.ClientReplicationError,
|
||||||
|
self.client.failover_replication,
|
||||||
|
rep_session)
|
||||||
|
|
||||||
|
def test_failback_replication(self):
|
||||||
|
rep_session = MockResource(_id='rep_id_1')
|
||||||
|
called = self.client.failback_replication(rep_session)
|
||||||
|
self.assertEqual(called['force_full_copy'], True)
|
||||||
|
|
||||||
|
def test_failback_replication_raise(self):
|
||||||
|
rep_session = MockResource(_id='rep_id_1')
|
||||||
|
|
||||||
|
def mock_failback(force_full_copy=None):
|
||||||
|
raise ex.UnityResourceNotFoundError()
|
||||||
|
|
||||||
|
rep_session.failback = mock_failback
|
||||||
|
self.assertRaises(client.ClientReplicationError,
|
||||||
|
self.client.failback_replication,
|
||||||
|
rep_session)
|
||||||
|
@ -32,7 +32,11 @@ from cinder.volume.drivers.dell_emc.unity import driver
|
|||||||
########################
|
########################
|
||||||
|
|
||||||
class MockAdapter(object):
|
class MockAdapter(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.is_setup = False
|
||||||
|
|
||||||
def do_setup(self, driver_object, configuration):
|
def do_setup(self, driver_object, configuration):
|
||||||
|
self.is_setup = True
|
||||||
raise ex.AdapterSetupError()
|
raise ex.AdapterSetupError()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -135,6 +139,20 @@ class MockAdapter(object):
|
|||||||
def delete_group_snapshot(group_snapshot):
|
def delete_group_snapshot(group_snapshot):
|
||||||
return group_snapshot
|
return group_snapshot
|
||||||
|
|
||||||
|
def failover(self, volumes, secondary_id=None, groups=None):
|
||||||
|
return {'volumes': volumes,
|
||||||
|
'secondary_id': secondary_id,
|
||||||
|
'groups': groups}
|
||||||
|
|
||||||
|
|
||||||
|
class MockReplicationManager(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.active_adapter = MockAdapter()
|
||||||
|
|
||||||
|
def do_setup(self, d):
|
||||||
|
if isinstance(d, driver.UnityDriver):
|
||||||
|
raise ex.ReplicationManagerSetupError()
|
||||||
|
|
||||||
|
|
||||||
########################
|
########################
|
||||||
#
|
#
|
||||||
@ -189,7 +207,7 @@ class UnityDriverTest(unittest.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.config = conf.Configuration(None)
|
self.config = conf.Configuration(None)
|
||||||
self.driver = driver.UnityDriver(configuration=self.config)
|
self.driver = driver.UnityDriver(configuration=self.config)
|
||||||
self.driver.adapter = MockAdapter()
|
self.driver.replication_manager = MockReplicationManager()
|
||||||
|
|
||||||
def test_default_initialize(self):
|
def test_default_initialize(self):
|
||||||
config = conf.Configuration(None)
|
config = conf.Configuration(None)
|
||||||
@ -208,6 +226,13 @@ class UnityDriverTest(unittest.TestCase):
|
|||||||
self.assertEqual(1, config.ssh_min_pool_conn)
|
self.assertEqual(1, config.ssh_min_pool_conn)
|
||||||
self.assertEqual(5, config.ssh_max_pool_conn)
|
self.assertEqual(5, config.ssh_max_pool_conn)
|
||||||
self.assertEqual('iSCSI', iscsi_driver.protocol)
|
self.assertEqual('iSCSI', iscsi_driver.protocol)
|
||||||
|
self.assertIsNone(iscsi_driver.active_backend_id)
|
||||||
|
|
||||||
|
def test_initialize_with_active_backend_id(self):
|
||||||
|
config = conf.Configuration(None)
|
||||||
|
iscsi_driver = driver.UnityDriver(configuration=config,
|
||||||
|
active_backend_id='secondary_unity')
|
||||||
|
self.assertEqual('secondary_unity', iscsi_driver.active_backend_id)
|
||||||
|
|
||||||
def test_fc_initialize(self):
|
def test_fc_initialize(self):
|
||||||
config = conf.Configuration(None)
|
config = conf.Configuration(None)
|
||||||
@ -219,7 +244,7 @@ class UnityDriverTest(unittest.TestCase):
|
|||||||
def f():
|
def f():
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
|
|
||||||
self.assertRaises(ex.AdapterSetupError, f)
|
self.assertRaises(ex.ReplicationManagerSetupError, f)
|
||||||
|
|
||||||
def test_create_volume(self):
|
def test_create_volume(self):
|
||||||
volume = self.get_volume()
|
volume = self.get_volume()
|
||||||
@ -422,3 +447,12 @@ class UnityDriverTest(unittest.TestCase):
|
|||||||
ret = self.driver.delete_group_snapshot(self.get_context(), cg_snap,
|
ret = self.driver.delete_group_snapshot(self.get_context(), cg_snap,
|
||||||
None)
|
None)
|
||||||
self.assertEqual(ret, cg_snap)
|
self.assertEqual(ret, cg_snap)
|
||||||
|
|
||||||
|
def test_failover_host(self):
|
||||||
|
volume = self.get_volume()
|
||||||
|
called = self.driver.failover_host(None, [volume],
|
||||||
|
secondary_id='secondary_unity',
|
||||||
|
groups=None)
|
||||||
|
self.assertListEqual(called['volumes'], [volume])
|
||||||
|
self.assertEqual('secondary_unity', called['secondary_id'])
|
||||||
|
self.assertIsNone(called['groups'])
|
||||||
|
@ -0,0 +1,362 @@
|
|||||||
|
# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import ddt
|
||||||
|
from mock import mock
|
||||||
|
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume import configuration as conf
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import driver
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import replication
|
||||||
|
from cinder.volume.drivers.san.san import san_opts
|
||||||
|
|
||||||
|
|
||||||
|
@ddt.ddt
|
||||||
|
class UnityReplicationTest(unittest.TestCase):
|
||||||
|
@ddt.data({'version': '1.0.0', 'protocol': 'FC',
|
||||||
|
'expected': unity_adapter.FCAdapter},
|
||||||
|
{'version': '2.0.0', 'protocol': 'iSCSI',
|
||||||
|
'expected': unity_adapter.ISCSIAdapter})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_init_adapter(self, version, protocol, expected):
|
||||||
|
a = replication.init_adapter(version, protocol)
|
||||||
|
self.assertIsInstance(a, expected)
|
||||||
|
self.assertEqual(version, a.version)
|
||||||
|
|
||||||
|
|
||||||
|
@ddt.ddt
|
||||||
|
class UnityReplicationDeviceTest(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.config = conf.Configuration(san_opts,
|
||||||
|
config_group='unity-backend')
|
||||||
|
self.config.san_ip = '1.1.1.1'
|
||||||
|
self.config.san_login = 'user1'
|
||||||
|
self.config.san_password = 'password1'
|
||||||
|
self.driver = driver.UnityDriver(configuration=self.config)
|
||||||
|
|
||||||
|
conf_dict = {'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'}
|
||||||
|
self.mock_adapter = mock.MagicMock(is_setup=False)
|
||||||
|
|
||||||
|
def mock_do_setup(*args):
|
||||||
|
self.mock_adapter.is_setup = True
|
||||||
|
|
||||||
|
self.mock_adapter.do_setup = mock.MagicMock(side_effect=mock_do_setup)
|
||||||
|
with mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.init_adapter',
|
||||||
|
return_value=self.mock_adapter):
|
||||||
|
self.replication_device = replication.ReplicationDevice(
|
||||||
|
conf_dict, self.driver)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{
|
||||||
|
'conf_dict': {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2'
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
'secondary_unity', '2.2.2.2', 'user1', 'password1', 60
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2',
|
||||||
|
'san_login': 'user2',
|
||||||
|
'san_password': 'password2',
|
||||||
|
'max_time_out_of_sync': 180
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
'secondary_unity', '2.2.2.2', 'user2', 'password2', 180
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test_init(self, conf_dict, expected):
|
||||||
|
self.driver.configuration.replication_device = conf_dict
|
||||||
|
device = replication.ReplicationDevice(conf_dict, self.driver)
|
||||||
|
|
||||||
|
self.assertListEqual(
|
||||||
|
[device.backend_id, device.san_ip, device.san_login,
|
||||||
|
device.san_password, device.max_time_out_of_sync],
|
||||||
|
expected)
|
||||||
|
|
||||||
|
self.assertIs(self.driver, device.driver)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{
|
||||||
|
'conf_dict': {'san_ip': '2.2.2.2'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {'backend_id': ' ', 'san_ip': '2.2.2.2'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {'backend_id': 'secondary_unity'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {'backend_id': 'secondary_unity', 'san_ip': ' '},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2',
|
||||||
|
'san_login': 'user2',
|
||||||
|
'san_password': 'password2',
|
||||||
|
'max_time_out_of_sync': 'NOT_A_NUMBER'
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test_init_raise(self, conf_dict):
|
||||||
|
self.driver.configuration.replication_device = conf_dict
|
||||||
|
self.assertRaisesRegexp(exception.InvalidConfigurationValue,
|
||||||
|
'Value .* is not valid for configuration '
|
||||||
|
'option "unity-backend.replication_device"',
|
||||||
|
replication.ReplicationDevice,
|
||||||
|
conf_dict, self.driver)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{
|
||||||
|
'conf_dict': {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2'
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
'2.2.2.2', 'user1', 'password1'
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conf_dict': {
|
||||||
|
'backend_id': 'secondary_unity',
|
||||||
|
'san_ip': '2.2.2.2',
|
||||||
|
'san_login': 'user2',
|
||||||
|
'san_password': 'password2',
|
||||||
|
'max_time_out_of_sync': 180
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
'2.2.2.2', 'user2', 'password2'
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test_device_conf(self, conf_dict, expected):
|
||||||
|
self.driver.configuration.replication_device = conf_dict
|
||||||
|
device = replication.ReplicationDevice(conf_dict, self.driver)
|
||||||
|
|
||||||
|
c = device.device_conf
|
||||||
|
self.assertListEqual([c.san_ip, c.san_login, c.san_password],
|
||||||
|
expected)
|
||||||
|
|
||||||
|
def test_setup_adapter(self):
|
||||||
|
self.replication_device.setup_adapter()
|
||||||
|
|
||||||
|
# Not call adapter.do_setup after initial setup done.
|
||||||
|
self.replication_device.setup_adapter()
|
||||||
|
|
||||||
|
self.mock_adapter.do_setup.assert_called_once()
|
||||||
|
|
||||||
|
def test_setup_adapter_fail(self):
|
||||||
|
def f(*args):
|
||||||
|
raise exception.VolumeBackendAPIException('adapter setup failed')
|
||||||
|
|
||||||
|
self.mock_adapter.do_setup = mock.MagicMock(side_effect=f)
|
||||||
|
|
||||||
|
with self.assertRaises(exception.VolumeBackendAPIException):
|
||||||
|
self.replication_device.setup_adapter()
|
||||||
|
|
||||||
|
def test_adapter(self):
|
||||||
|
self.assertIs(self.mock_adapter, self.replication_device.adapter)
|
||||||
|
self.mock_adapter.do_setup.assert_called_once()
|
||||||
|
|
||||||
|
def test_destination_pool(self):
|
||||||
|
self.mock_adapter.storage_pools_map = {'pool-1': 'pool-1'}
|
||||||
|
self.assertEqual('pool-1', self.replication_device.destination_pool)
|
||||||
|
|
||||||
|
|
||||||
|
@ddt.ddt
|
||||||
|
class UnityReplicationManagerTest(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.config = conf.Configuration(san_opts,
|
||||||
|
config_group='unity-backend')
|
||||||
|
self.config.san_ip = '1.1.1.1'
|
||||||
|
self.config.san_login = 'user1'
|
||||||
|
self.config.san_password = 'password1'
|
||||||
|
self.config.replication_device = [
|
||||||
|
{'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'}
|
||||||
|
]
|
||||||
|
self.driver = driver.UnityDriver(configuration=self.config)
|
||||||
|
|
||||||
|
self.replication_manager = replication.ReplicationManager()
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_do_setup(self, mock_setup_adapter):
|
||||||
|
self.replication_manager.do_setup(self.driver)
|
||||||
|
calls = [mock.call(), mock.call()]
|
||||||
|
|
||||||
|
default_device = self.replication_manager.default_device
|
||||||
|
self.assertEqual('1.1.1.1', default_device.san_ip)
|
||||||
|
self.assertEqual('user1', default_device.san_login)
|
||||||
|
self.assertEqual('password1', default_device.san_password)
|
||||||
|
|
||||||
|
devices = self.replication_manager.replication_devices
|
||||||
|
self.assertEqual(1, len(devices))
|
||||||
|
self.assertIn('secondary_unity', devices)
|
||||||
|
rep_device = devices['secondary_unity']
|
||||||
|
self.assertEqual('2.2.2.2', rep_device.san_ip)
|
||||||
|
self.assertEqual('user1', rep_device.san_login)
|
||||||
|
self.assertEqual('password1', rep_device.san_password)
|
||||||
|
|
||||||
|
self.assertTrue(self.replication_manager.is_replication_configured)
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
self.replication_manager.active_backend_id is None
|
||||||
|
or self.replication_manager.active_backend_id == 'default')
|
||||||
|
|
||||||
|
self.assertFalse(self.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
active_adapter = self.replication_manager.active_adapter
|
||||||
|
calls.append(mock.call())
|
||||||
|
self.assertIs(default_device.adapter, active_adapter)
|
||||||
|
calls.append(mock.call())
|
||||||
|
mock_setup_adapter.assert_has_calls(calls)
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_do_setup_replication_not_configured(self, mock_setup_adapter):
|
||||||
|
self.driver.configuration.replication_device = None
|
||||||
|
|
||||||
|
self.replication_manager.do_setup(self.driver)
|
||||||
|
calls = [mock.call()]
|
||||||
|
|
||||||
|
default_device = self.replication_manager.default_device
|
||||||
|
self.assertEqual('1.1.1.1', default_device.san_ip)
|
||||||
|
self.assertEqual('user1', default_device.san_login)
|
||||||
|
self.assertEqual('password1', default_device.san_password)
|
||||||
|
|
||||||
|
devices = self.replication_manager.replication_devices
|
||||||
|
self.assertEqual(0, len(devices))
|
||||||
|
|
||||||
|
self.assertFalse(self.replication_manager.is_replication_configured)
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
self.replication_manager.active_backend_id is None
|
||||||
|
or self.replication_manager.active_backend_id == 'default')
|
||||||
|
|
||||||
|
self.assertFalse(self.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
active_adapter = self.replication_manager.active_adapter
|
||||||
|
calls.append(mock.call())
|
||||||
|
self.assertIs(default_device.adapter, active_adapter)
|
||||||
|
calls.append(mock.call())
|
||||||
|
|
||||||
|
mock_setup_adapter.assert_has_calls(calls)
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_do_setup_failed_over(self, mock_setup_adapter):
|
||||||
|
self.driver = driver.UnityDriver(configuration=self.config,
|
||||||
|
active_backend_id='secondary_unity')
|
||||||
|
|
||||||
|
self.replication_manager.do_setup(self.driver)
|
||||||
|
calls = [mock.call()]
|
||||||
|
|
||||||
|
default_device = self.replication_manager.default_device
|
||||||
|
self.assertEqual('1.1.1.1', default_device.san_ip)
|
||||||
|
self.assertEqual('user1', default_device.san_login)
|
||||||
|
self.assertEqual('password1', default_device.san_password)
|
||||||
|
|
||||||
|
devices = self.replication_manager.replication_devices
|
||||||
|
self.assertEqual(1, len(devices))
|
||||||
|
self.assertIn('secondary_unity', devices)
|
||||||
|
rep_device = devices['secondary_unity']
|
||||||
|
self.assertEqual('2.2.2.2', rep_device.san_ip)
|
||||||
|
self.assertEqual('user1', rep_device.san_login)
|
||||||
|
self.assertEqual('password1', rep_device.san_password)
|
||||||
|
|
||||||
|
self.assertTrue(self.replication_manager.is_replication_configured)
|
||||||
|
|
||||||
|
self.assertEqual('secondary_unity',
|
||||||
|
self.replication_manager.active_backend_id)
|
||||||
|
|
||||||
|
self.assertTrue(self.replication_manager.is_service_failed_over)
|
||||||
|
|
||||||
|
active_adapter = self.replication_manager.active_adapter
|
||||||
|
calls.append(mock.call())
|
||||||
|
self.assertIs(rep_device.adapter, active_adapter)
|
||||||
|
calls.append(mock.call())
|
||||||
|
|
||||||
|
mock_setup_adapter.assert_has_calls(calls)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{
|
||||||
|
'rep_device': [{
|
||||||
|
'backend_id': 'default', 'san_ip': '2.2.2.2'
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'rep_device': [{
|
||||||
|
'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'
|
||||||
|
}, {
|
||||||
|
'backend_id': 'default', 'san_ip': '3.3.3.3'
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'rep_device': [{
|
||||||
|
'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'
|
||||||
|
}, {
|
||||||
|
'backend_id': 'third_unity', 'san_ip': '3.3.3.3'
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_do_setup_raise_invalid_rep_device(self, mock_setup_adapter,
|
||||||
|
rep_device):
|
||||||
|
self.driver.configuration.replication_device = rep_device
|
||||||
|
|
||||||
|
self.assertRaises(exception.InvalidConfigurationValue,
|
||||||
|
self.replication_manager.do_setup,
|
||||||
|
self.driver)
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_do_setup_raise_invalid_active_backend_id(self,
|
||||||
|
mock_setup_adapter):
|
||||||
|
self.driver = driver.UnityDriver(configuration=self.config,
|
||||||
|
active_backend_id='third_unity')
|
||||||
|
|
||||||
|
self.assertRaises(exception.InvalidConfigurationValue,
|
||||||
|
self.replication_manager.do_setup,
|
||||||
|
self.driver)
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
|
||||||
|
'replication.ReplicationDevice.setup_adapter')
|
||||||
|
def test_failover_service(self, mock_setup_adapter):
|
||||||
|
|
||||||
|
self.assertIsNone(self.replication_manager.active_backend_id)
|
||||||
|
|
||||||
|
self.replication_manager.do_setup(self.driver)
|
||||||
|
self.replication_manager.active_adapter
|
||||||
|
|
||||||
|
self.assertEqual('default',
|
||||||
|
self.replication_manager.active_backend_id)
|
||||||
|
|
||||||
|
self.replication_manager.failover_service('secondary_unity')
|
||||||
|
self.assertEqual('secondary_unity',
|
||||||
|
self.replication_manager.active_backend_id)
|
@ -61,6 +61,7 @@ class VolumeParams(object):
|
|||||||
self._is_thick = None
|
self._is_thick = None
|
||||||
self._is_compressed = None
|
self._is_compressed = None
|
||||||
self._is_in_cg = None
|
self._is_in_cg = None
|
||||||
|
self._is_replication_enabled = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def volume_id(self):
|
def volume_id(self):
|
||||||
@ -149,6 +150,13 @@ class VolumeParams(object):
|
|||||||
return self._volume.group_id
|
return self._volume.group_id
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_replication_enabled(self):
|
||||||
|
if self._is_replication_enabled is None:
|
||||||
|
value = utils.get_extra_spec(self._volume, 'replication_enabled')
|
||||||
|
self._is_replication_enabled = value == '<is> True'
|
||||||
|
return self._is_replication_enabled
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (self.volume_id == other.volume_id and
|
return (self.volume_id == other.volume_id and
|
||||||
self.name == other.name and
|
self.name == other.name and
|
||||||
@ -157,7 +165,8 @@ class VolumeParams(object):
|
|||||||
self.is_thick == other.is_thick and
|
self.is_thick == other.is_thick and
|
||||||
self.is_compressed == other.is_compressed and
|
self.is_compressed == other.is_compressed and
|
||||||
self.is_in_cg == other.is_in_cg and
|
self.is_in_cg == other.is_in_cg and
|
||||||
self.cg_id == other.cg_id)
|
self.cg_id == other.cg_id and
|
||||||
|
self.is_replication_enabled == other.is_replication_enabled)
|
||||||
|
|
||||||
|
|
||||||
class CommonAdapter(object):
|
class CommonAdapter(object):
|
||||||
@ -166,6 +175,7 @@ class CommonAdapter(object):
|
|||||||
driver_volume_type = 'unknown'
|
driver_volume_type = 'unknown'
|
||||||
|
|
||||||
def __init__(self, version=None):
|
def __init__(self, version=None):
|
||||||
|
self.is_setup = False
|
||||||
self.version = version
|
self.version = version
|
||||||
self.driver = None
|
self.driver = None
|
||||||
self.config = None
|
self.config = None
|
||||||
@ -185,10 +195,17 @@ class CommonAdapter(object):
|
|||||||
self.allowed_ports = None
|
self.allowed_ports = None
|
||||||
self.remove_empty_host = False
|
self.remove_empty_host = False
|
||||||
self.to_lock_host = False
|
self.to_lock_host = False
|
||||||
|
self.replication_manager = None
|
||||||
|
|
||||||
def do_setup(self, driver, conf):
|
def do_setup(self, driver, conf):
|
||||||
|
"""Sets up the attributes of adapter.
|
||||||
|
|
||||||
|
:param driver: the unity driver.
|
||||||
|
:param conf: the driver configurations.
|
||||||
|
"""
|
||||||
self.driver = driver
|
self.driver = driver
|
||||||
self.config = self.normalize_config(conf)
|
self.config = self.normalize_config(conf)
|
||||||
|
self.replication_manager = driver.replication_manager
|
||||||
self.configured_pool_names = self.config.unity_storage_pool_names
|
self.configured_pool_names = self.config.unity_storage_pool_names
|
||||||
self.reserved_percentage = self.config.reserved_percentage
|
self.reserved_percentage = self.config.reserved_percentage
|
||||||
self.max_over_subscription_ratio = (
|
self.max_over_subscription_ratio = (
|
||||||
@ -222,6 +239,8 @@ class CommonAdapter(object):
|
|||||||
persist_path = os.path.join(cfg.CONF.state_path, 'unity', folder_name)
|
persist_path = os.path.join(cfg.CONF.state_path, 'unity', folder_name)
|
||||||
storops.TCHelper.set_up(persist_path)
|
storops.TCHelper.set_up(persist_path)
|
||||||
|
|
||||||
|
self.is_setup = True
|
||||||
|
|
||||||
def normalize_config(self, config):
|
def normalize_config(self, config):
|
||||||
config.unity_storage_pool_names = utils.remove_empty(
|
config.unity_storage_pool_names = utils.remove_empty(
|
||||||
'%s.unity_storage_pool_names' % config.config_group,
|
'%s.unity_storage_pool_names' % config.config_group,
|
||||||
@ -298,15 +317,39 @@ class CommonAdapter(object):
|
|||||||
valid_names = utils.validate_pool_names(names, array_pools.name)
|
valid_names = utils.validate_pool_names(names, array_pools.name)
|
||||||
return {p.name: p for p in array_pools if p.name in valid_names}
|
return {p.name: p for p in array_pools if p.name in valid_names}
|
||||||
|
|
||||||
def makeup_model(self, lun, is_snap_lun=False):
|
def makeup_model(self, lun_id, is_snap_lun=False):
|
||||||
lun_type = 'snap_lun' if is_snap_lun else 'lun'
|
lun_type = 'snap_lun' if is_snap_lun else 'lun'
|
||||||
location = self._build_provider_location(lun_id=lun.get_id(),
|
location = self._build_provider_location(lun_id=lun_id,
|
||||||
lun_type=lun_type)
|
lun_type=lun_type)
|
||||||
return {
|
return {
|
||||||
'provider_location': location,
|
'provider_location': location,
|
||||||
'provider_id': lun.get_id()
|
'provider_id': lun_id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def setup_replications(self, lun, model_update):
|
||||||
|
if not self.replication_manager.is_replication_configured:
|
||||||
|
LOG.debug('Replication device not configured, '
|
||||||
|
'skip setting up replication for lun %s',
|
||||||
|
lun.name)
|
||||||
|
return model_update
|
||||||
|
|
||||||
|
rep_data = {}
|
||||||
|
rep_devices = self.replication_manager.replication_devices
|
||||||
|
for backend_id, dst in rep_devices.items():
|
||||||
|
remote_serial_number = dst.adapter.serial_number
|
||||||
|
LOG.debug('Setting up replication to remote system %s',
|
||||||
|
remote_serial_number)
|
||||||
|
remote_system = self.client.get_remote_system(remote_serial_number)
|
||||||
|
if remote_system is None:
|
||||||
|
raise exception.VolumeBackendAPIException(
|
||||||
|
data=_('Setup replication to remote system %s failed.'
|
||||||
|
'Cannot find it.') % remote_serial_number)
|
||||||
|
rep_session = self.client.create_replication(
|
||||||
|
lun, dst.max_time_out_of_sync,
|
||||||
|
dst.destination_pool.get_id(), remote_system)
|
||||||
|
rep_data[backend_id] = rep_session.name
|
||||||
|
return utils.enable_replication_status(model_update, rep_data)
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Creates a volume.
|
"""Creates a volume.
|
||||||
|
|
||||||
@ -321,13 +364,15 @@ class CommonAdapter(object):
|
|||||||
'io_limit_policy': params.io_limit_policy,
|
'io_limit_policy': params.io_limit_policy,
|
||||||
'is_thick': params.is_thick,
|
'is_thick': params.is_thick,
|
||||||
'is_compressed': params.is_compressed,
|
'is_compressed': params.is_compressed,
|
||||||
'cg_id': params.cg_id
|
'cg_id': params.cg_id,
|
||||||
|
'is_replication_enabled': params.is_replication_enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info('Create Volume: %(name)s, size: %(size)s, description: '
|
LOG.info('Create Volume: %(name)s, size: %(size)s, description: '
|
||||||
'%(description)s, pool: %(pool)s, io limit policy: '
|
'%(description)s, pool: %(pool)s, io limit policy: '
|
||||||
'%(io_limit_policy)s, thick: %(is_thick)s, '
|
'%(io_limit_policy)s, thick: %(is_thick)s, '
|
||||||
'compressed: %(is_compressed)s, cg_group: %(cg_id)s.',
|
'compressed: %(is_compressed)s, cg_group: %(cg_id)s, '
|
||||||
|
'replication_enabled: %(is_replication_enabled)s.',
|
||||||
log_params)
|
log_params)
|
||||||
|
|
||||||
lun = self.client.create_lun(
|
lun = self.client.create_lun(
|
||||||
@ -338,12 +383,17 @@ class CommonAdapter(object):
|
|||||||
io_limit_policy=params.io_limit_policy,
|
io_limit_policy=params.io_limit_policy,
|
||||||
is_thin=False if params.is_thick else None,
|
is_thin=False if params.is_thick else None,
|
||||||
is_compressed=params.is_compressed)
|
is_compressed=params.is_compressed)
|
||||||
|
|
||||||
if params.cg_id:
|
if params.cg_id:
|
||||||
LOG.debug('Adding lun %(lun)s to cg %(cg)s.',
|
LOG.debug('Adding lun %(lun)s to cg %(cg)s.',
|
||||||
{'lun': lun.get_id(), 'cg': params.cg_id})
|
{'lun': lun.get_id(), 'cg': params.cg_id})
|
||||||
self.client.update_cg(params.cg_id, [lun.get_id()], ())
|
self.client.update_cg(params.cg_id, [lun.get_id()], ())
|
||||||
|
|
||||||
return self.makeup_model(lun)
|
model_update = self.makeup_model(lun.get_id())
|
||||||
|
|
||||||
|
if params.is_replication_enabled:
|
||||||
|
model_update = self.setup_replications(lun, model_update)
|
||||||
|
return model_update
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
lun_id = self.get_lun_id(volume)
|
lun_id = self.get_lun_id(volume)
|
||||||
@ -474,6 +524,10 @@ class CommonAdapter(object):
|
|||||||
'volume_backend_name': self.volume_backend_name,
|
'volume_backend_name': self.volume_backend_name,
|
||||||
'storage_protocol': self.protocol,
|
'storage_protocol': self.protocol,
|
||||||
'pools': self.get_pools_stats(),
|
'pools': self.get_pools_stats(),
|
||||||
|
'replication_enabled':
|
||||||
|
self.replication_manager.is_replication_configured,
|
||||||
|
'replication_targets':
|
||||||
|
list(self.replication_manager.replication_devices),
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_pools_stats(self):
|
def get_pools_stats(self):
|
||||||
@ -499,7 +553,11 @@ class CommonAdapter(object):
|
|||||||
'compression_support': pool.is_all_flash,
|
'compression_support': pool.is_all_flash,
|
||||||
'max_over_subscription_ratio': (
|
'max_over_subscription_ratio': (
|
||||||
self.max_over_subscription_ratio),
|
self.max_over_subscription_ratio),
|
||||||
'multiattach': True
|
'multiattach': True,
|
||||||
|
'replication_enabled':
|
||||||
|
self.replication_manager.is_replication_configured,
|
||||||
|
'replication_targets':
|
||||||
|
list(self.replication_manager.replication_devices),
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_lun_id(self, volume):
|
def get_lun_id(self, volume):
|
||||||
@ -737,9 +795,13 @@ class CommonAdapter(object):
|
|||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
snap = self.client.get_snap(snapshot.name)
|
snap = self.client.get_snap(snapshot.name)
|
||||||
return self.makeup_model(
|
params = VolumeParams(self, volume)
|
||||||
self._thin_clone(VolumeParams(self, volume), snap),
|
lun = self._thin_clone(params, snap)
|
||||||
is_snap_lun=True)
|
model_update = self.makeup_model(lun.get_id(), is_snap_lun=True)
|
||||||
|
|
||||||
|
if params.is_replication_enabled:
|
||||||
|
model_update = self.setup_replications(lun, model_update)
|
||||||
|
return model_update
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
"""Creates cloned volume.
|
"""Creates cloned volume.
|
||||||
@ -777,10 +839,15 @@ class CommonAdapter(object):
|
|||||||
'%(name)s is attached: %(attach)s.',
|
'%(name)s is attached: %(attach)s.',
|
||||||
{'name': src_vref.name,
|
{'name': src_vref.name,
|
||||||
'attach': src_vref.volume_attachment})
|
'attach': src_vref.volume_attachment})
|
||||||
return self.makeup_model(lun)
|
model_update = self.makeup_model(lun.get_id())
|
||||||
else:
|
else:
|
||||||
lun = self._thin_clone(vol_params, src_snap, src_lun=src_lun)
|
lun = self._thin_clone(vol_params, src_snap, src_lun=src_lun)
|
||||||
return self.makeup_model(lun, is_snap_lun=True)
|
model_update = self.makeup_model(lun.get_id(),
|
||||||
|
is_snap_lun=True)
|
||||||
|
|
||||||
|
if vol_params.is_replication_enabled:
|
||||||
|
model_update = self.setup_replications(lun, model_update)
|
||||||
|
return model_update
|
||||||
|
|
||||||
def get_pool_name(self, volume):
|
def get_pool_name(self, volume):
|
||||||
return self.client.get_pool_name(volume.name)
|
return self.client.get_pool_name(volume.name)
|
||||||
@ -925,6 +992,75 @@ class CommonAdapter(object):
|
|||||||
self.client.delete_snap(cg_snap)
|
self.client.delete_snap(cg_snap)
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
|
@cinder_utils.trace
|
||||||
|
def failover(self, volumes, secondary_id=None, groups=None):
|
||||||
|
# TODO(ryan) support group failover after group bp merges
|
||||||
|
# https://review.openstack.org/#/c/574119/
|
||||||
|
|
||||||
|
if secondary_id is None:
|
||||||
|
LOG.debug('No secondary specified when failover. '
|
||||||
|
'Randomly choose a secondary')
|
||||||
|
secondary_id = random.choice(
|
||||||
|
list(self.replication_manager.replication_devices))
|
||||||
|
LOG.debug('Chose %s as secondary', secondary_id)
|
||||||
|
|
||||||
|
is_failback = secondary_id == 'default'
|
||||||
|
|
||||||
|
def _failover_or_back(volume):
|
||||||
|
LOG.debug('Failing over volume: %(vol)s to secondary id: '
|
||||||
|
'%(sec_id)s', vol=volume.name, sec_id=secondary_id)
|
||||||
|
model_update = {
|
||||||
|
'volume_id': volume.id,
|
||||||
|
'updates': {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if not volume.replication_driver_data:
|
||||||
|
LOG.error('Empty replication_driver_data of volume: %s, '
|
||||||
|
'replication session name should be in it.',
|
||||||
|
volume.name)
|
||||||
|
return utils.error_replication_status(model_update)
|
||||||
|
rep_data = utils.load_replication_data(
|
||||||
|
volume.replication_driver_data)
|
||||||
|
|
||||||
|
if is_failback:
|
||||||
|
# Failback executed on secondary backend which is currently
|
||||||
|
# active.
|
||||||
|
_adapter = self.replication_manager.default_device.adapter
|
||||||
|
_client = self.replication_manager.active_adapter.client
|
||||||
|
rep_name = rep_data[self.replication_manager.active_backend_id]
|
||||||
|
else:
|
||||||
|
# Failover executed on secondary backend because primary could
|
||||||
|
# die.
|
||||||
|
_adapter = self.replication_manager.replication_devices[
|
||||||
|
secondary_id].adapter
|
||||||
|
_client = _adapter.client
|
||||||
|
rep_name = rep_data[secondary_id]
|
||||||
|
|
||||||
|
try:
|
||||||
|
rep_session = _client.get_replication_session(name=rep_name)
|
||||||
|
|
||||||
|
if is_failback:
|
||||||
|
_client.failback_replication(rep_session)
|
||||||
|
new_model = _adapter.makeup_model(
|
||||||
|
rep_session.src_resource_id)
|
||||||
|
else:
|
||||||
|
_client.failover_replication(rep_session)
|
||||||
|
new_model = _adapter.makeup_model(
|
||||||
|
rep_session.dst_resource_id)
|
||||||
|
|
||||||
|
model_update['updates'].update(new_model)
|
||||||
|
self.replication_manager.failover_service(secondary_id)
|
||||||
|
return model_update
|
||||||
|
except client.ClientReplicationError as ex:
|
||||||
|
LOG.error('Failover failed, volume: %(vol)s, secondary id: '
|
||||||
|
'%(sec_id)s, error: %(err)s',
|
||||||
|
vol=volume.name, sec_id=secondary_id, err=ex)
|
||||||
|
return utils.error_replication_status(model_update)
|
||||||
|
|
||||||
|
return (secondary_id,
|
||||||
|
[_failover_or_back(volume) for volume in volumes],
|
||||||
|
[])
|
||||||
|
|
||||||
|
|
||||||
class ISCSIAdapter(CommonAdapter):
|
class ISCSIAdapter(CommonAdapter):
|
||||||
protocol = PROTOCOL_ISCSI
|
protocol = PROTOCOL_ISCSI
|
||||||
|
@ -104,10 +104,47 @@ class UnityClient(object):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
lun = self.system.get_lun(_id=lun_id)
|
lun = self.system.get_lun(_id=lun_id)
|
||||||
lun.delete()
|
|
||||||
except storops_ex.UnityResourceNotFoundError:
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
LOG.debug("LUN %s doesn't exist. Deletion is not needed.",
|
LOG.debug("Cannot get LUN %s from unity. Do nothing.", lun_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
def _delete_lun_if_exist(force_snap_delete=False):
|
||||||
|
"""Deletes LUN, skip if it doesn't exist."""
|
||||||
|
try:
|
||||||
|
lun.delete(force_snap_delete=force_snap_delete)
|
||||||
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
|
LOG.debug("LUN %s doesn't exist. Deletion is not needed.",
|
||||||
|
lun_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_delete_lun_if_exist()
|
||||||
|
except storops_ex.UnityDeleteLunInReplicationError:
|
||||||
|
LOG.info("LUN %s is participating in replication sessions. "
|
||||||
|
"Delete replication sessions first",
|
||||||
|
lun_id)
|
||||||
|
self.delete_lun_replications(lun_id)
|
||||||
|
|
||||||
|
# It could fail if not pass in force_snap_delete when
|
||||||
|
# deleting the lun immediately after
|
||||||
|
# deleting the replication sessions.
|
||||||
|
_delete_lun_if_exist(force_snap_delete=True)
|
||||||
|
|
||||||
|
def delete_lun_replications(self, lun_id):
|
||||||
|
LOG.debug("Deleting all the replication sessions which are from "
|
||||||
|
"lun %s", lun_id)
|
||||||
|
try:
|
||||||
|
rep_sessions = self.system.get_replication_session(
|
||||||
|
src_resource_id=lun_id)
|
||||||
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
|
LOG.debug("No replication session found from lun %s. Do nothing.",
|
||||||
lun_id)
|
lun_id)
|
||||||
|
else:
|
||||||
|
for session in rep_sessions:
|
||||||
|
try:
|
||||||
|
session.delete()
|
||||||
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
|
LOG.debug("Replication session %s doesn't exist. "
|
||||||
|
"Skip the deletion.", session.get_id())
|
||||||
|
|
||||||
def get_lun(self, lun_id=None, name=None):
|
def get_lun(self, lun_id=None, name=None):
|
||||||
"""Gets LUN on the Unity system.
|
"""Gets LUN on the Unity system.
|
||||||
@ -388,3 +425,86 @@ class UnityClient(object):
|
|||||||
|
|
||||||
def filter_snaps_in_cg_snap(self, cg_snap_id):
|
def filter_snaps_in_cg_snap(self, cg_snap_id):
|
||||||
return self.system.get_snap(snap_group=cg_snap_id).list
|
return self.system.get_snap(snap_group=cg_snap_id).list
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_replication(src_lun, max_time_out_of_sync,
|
||||||
|
dst_pool_id, remote_system):
|
||||||
|
"""Creates a new lun on remote system and sets up replication to it."""
|
||||||
|
return src_lun.replicate_with_dst_resource_provisioning(
|
||||||
|
max_time_out_of_sync, dst_pool_id, remote_system=remote_system,
|
||||||
|
dst_lun_name=src_lun.name)
|
||||||
|
|
||||||
|
def get_remote_system(self, name=None):
|
||||||
|
"""Gets remote system on the Unity system.
|
||||||
|
|
||||||
|
:param name: remote system name.
|
||||||
|
:return: remote system.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self.system.get_remote_system(name=name)
|
||||||
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
|
LOG.warning("Not found remote system with name %s. Return None.",
|
||||||
|
name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_replication_session(self, name=None,
|
||||||
|
src_resource_id=None, dst_resource_id=None):
|
||||||
|
"""Gets replication session via its name.
|
||||||
|
|
||||||
|
:param name: replication session name.
|
||||||
|
:param src_resource_id: replication session's src_resource_id.
|
||||||
|
:param dst_resource_id: replication session's dst_resource_id.
|
||||||
|
:return: replication session.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self.system.get_replication_session(
|
||||||
|
name=name, src_resource_id=src_resource_id,
|
||||||
|
dst_resource_id=dst_resource_id)
|
||||||
|
except storops_ex.UnityResourceNotFoundError:
|
||||||
|
raise ClientReplicationError(
|
||||||
|
'Replication session with name %(name)s not found.'.format(
|
||||||
|
name=name))
|
||||||
|
|
||||||
|
def failover_replication(self, rep_session):
|
||||||
|
"""Fails over a replication session.
|
||||||
|
|
||||||
|
:param rep_session: replication session to fail over.
|
||||||
|
"""
|
||||||
|
name = rep_session.name
|
||||||
|
LOG.debug('Failing over replication: %s', name)
|
||||||
|
try:
|
||||||
|
# In OpenStack, only support to failover triggered from secondary
|
||||||
|
# backend because the primary could be down. Then `sync=False`
|
||||||
|
# is required here which means it won't sync from primary to
|
||||||
|
# secondary before failover.
|
||||||
|
return rep_session.failover(sync=False)
|
||||||
|
except storops_ex.UnityException as ex:
|
||||||
|
raise ClientReplicationError(
|
||||||
|
'Failover of replication: %(name)s failed, '
|
||||||
|
'error: %(err)s'.format(name=name, err=ex)
|
||||||
|
)
|
||||||
|
LOG.debug('Replication: %s failed over', name)
|
||||||
|
|
||||||
|
def failback_replication(self, rep_session):
|
||||||
|
"""Fails back a replication session.
|
||||||
|
|
||||||
|
:param rep_session: replication session to fail back.
|
||||||
|
"""
|
||||||
|
name = rep_session.name
|
||||||
|
LOG.debug('Failing back replication: %s', name)
|
||||||
|
try:
|
||||||
|
# If the replication was failed-over before initial copy done,
|
||||||
|
# following failback will fail without `force_full_copy` because
|
||||||
|
# the primary # and secondary data have no common base.
|
||||||
|
# `force_full_copy=True` has no effect if initial copy done.
|
||||||
|
return rep_session.failback(force_full_copy=True)
|
||||||
|
except storops_ex.UnityException as ex:
|
||||||
|
raise ClientReplicationError(
|
||||||
|
'Failback of replication: %(name)s failed, '
|
||||||
|
'error: %(err)s'.format(name=name, err=ex)
|
||||||
|
)
|
||||||
|
LOG.debug('Replication: %s failed back', name)
|
||||||
|
|
||||||
|
|
||||||
|
class ClientReplicationError(exception.CinderException):
|
||||||
|
pass
|
||||||
|
@ -24,6 +24,7 @@ from cinder import interface
|
|||||||
from cinder.volume import configuration
|
from cinder.volume import configuration
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.dell_emc.unity import adapter
|
from cinder.volume.drivers.dell_emc.unity import adapter
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import replication
|
||||||
from cinder.volume.drivers.san.san import san_opts
|
from cinder.volume.drivers.san.san import san_opts
|
||||||
from cinder.volume import volume_utils
|
from cinder.volume import volume_utils
|
||||||
from cinder.zonemanager import utils as zm_utils
|
from cinder.zonemanager import utils as zm_utils
|
||||||
@ -80,9 +81,10 @@ class UnityDriver(driver.ManageableVD,
|
|||||||
4.2.0 - Support compressed volume
|
4.2.0 - Support compressed volume
|
||||||
5.0.0 - Support storage assisted volume migration
|
5.0.0 - Support storage assisted volume migration
|
||||||
6.0.0 - Support generic group and consistent group
|
6.0.0 - Support generic group and consistent group
|
||||||
|
6.1.0 - Support volume replication
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = '06.00.00'
|
VERSION = '06.01.00'
|
||||||
VENDOR = 'Dell EMC'
|
VENDOR = 'Dell EMC'
|
||||||
# ThirdPartySystems wiki page
|
# ThirdPartySystems wiki page
|
||||||
CI_WIKI_NAME = "EMC_UNITY_CI"
|
CI_WIKI_NAME = "EMC_UNITY_CI"
|
||||||
@ -91,20 +93,26 @@ class UnityDriver(driver.ManageableVD,
|
|||||||
super(UnityDriver, self).__init__(*args, **kwargs)
|
super(UnityDriver, self).__init__(*args, **kwargs)
|
||||||
self.configuration.append_config_values(UNITY_OPTS)
|
self.configuration.append_config_values(UNITY_OPTS)
|
||||||
self.configuration.append_config_values(san_opts)
|
self.configuration.append_config_values(san_opts)
|
||||||
|
|
||||||
|
# active_backend_id is not None if the service is failed over.
|
||||||
|
self.active_backend_id = kwargs.get('active_backend_id')
|
||||||
|
self.replication_manager = replication.ReplicationManager()
|
||||||
protocol = self.configuration.storage_protocol
|
protocol = self.configuration.storage_protocol
|
||||||
if protocol.lower() == adapter.PROTOCOL_FC.lower():
|
if protocol.lower() == adapter.PROTOCOL_FC.lower():
|
||||||
self.protocol = adapter.PROTOCOL_FC
|
self.protocol = adapter.PROTOCOL_FC
|
||||||
self.adapter = adapter.FCAdapter(self.VERSION)
|
|
||||||
else:
|
else:
|
||||||
self.protocol = adapter.PROTOCOL_ISCSI
|
self.protocol = adapter.PROTOCOL_ISCSI
|
||||||
self.adapter = adapter.ISCSIAdapter(self.VERSION)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_driver_options():
|
def get_driver_options():
|
||||||
return UNITY_OPTS
|
return UNITY_OPTS
|
||||||
|
|
||||||
def do_setup(self, context):
|
def do_setup(self, context):
|
||||||
self.adapter.do_setup(self, self.configuration)
|
self.replication_manager.do_setup(self)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def adapter(self):
|
||||||
|
return self.replication_manager.active_adapter
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
pass
|
pass
|
||||||
@ -316,3 +324,8 @@ class UnityDriver(driver.ManageableVD,
|
|||||||
def delete_group_snapshot(self, context, group_snapshot, snapshots):
|
def delete_group_snapshot(self, context, group_snapshot, snapshots):
|
||||||
"""Deletes a snapshot of consistency group."""
|
"""Deletes a snapshot of consistency group."""
|
||||||
return self.adapter.delete_group_snapshot(group_snapshot)
|
return self.adapter.delete_group_snapshot(group_snapshot)
|
||||||
|
|
||||||
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
|
"""Failovers volumes to secondary backend."""
|
||||||
|
return self.adapter.failover(volumes,
|
||||||
|
secondary_id=secondary_id, groups=groups)
|
||||||
|
214
cinder/volume/drivers/dell_emc/unity/replication.py
Normal file
214
cinder/volume/drivers/dell_emc/unity/replication.py
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import random
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import excutils
|
||||||
|
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicationDevice(object):
|
||||||
|
def __init__(self, conf_dict, driver):
|
||||||
|
"""Constructs a replication device from driver configuration.
|
||||||
|
|
||||||
|
:param conf_dict: the conf of one replication device entry. It's a
|
||||||
|
dict with content like
|
||||||
|
`{backend_id: vendor-id-1, key-1: val-1, ...}`
|
||||||
|
:param driver: the backend driver.
|
||||||
|
"""
|
||||||
|
driver_conf = driver.configuration
|
||||||
|
|
||||||
|
self.backend_id = conf_dict.get('backend_id')
|
||||||
|
self.san_ip = conf_dict.get('san_ip', None)
|
||||||
|
if (self.backend_id is None or not self.backend_id.strip()
|
||||||
|
or self.san_ip is None or not self.san_ip.strip()):
|
||||||
|
LOG.error('No backend_id or san_ip in %(conf)s of '
|
||||||
|
'%(group)s.replication_device',
|
||||||
|
conf=conf_dict, group=driver_conf.config_group)
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option='%s.replication_device' % driver_conf.config_group,
|
||||||
|
value=driver_conf.replication_device)
|
||||||
|
|
||||||
|
# Use the driver settings if not configured in replication_device.
|
||||||
|
self.san_login = conf_dict.get('san_login', driver_conf.san_login)
|
||||||
|
self.san_password = conf_dict.get('san_password',
|
||||||
|
driver_conf.san_password)
|
||||||
|
|
||||||
|
# Max time (in minute) out of sync is a setting for replication.
|
||||||
|
# It means maximum time to wait before syncing the source and
|
||||||
|
# destination. `0` means it is a sync replication. Default is `60`.
|
||||||
|
try:
|
||||||
|
self.max_time_out_of_sync = int(
|
||||||
|
conf_dict.get('max_time_out_of_sync', 60))
|
||||||
|
except ValueError:
|
||||||
|
LOG.error('max_time_out_of_sync is not a number, %(conf)s of '
|
||||||
|
'%(group)s.replication_device',
|
||||||
|
conf=conf_dict, group=driver_conf.config_group)
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option='%s.replication_device' % driver_conf.config_group,
|
||||||
|
value=driver_conf.replication_device)
|
||||||
|
if self.max_time_out_of_sync < 0:
|
||||||
|
LOG.error('max_time_out_of_sync should be greater than 0, '
|
||||||
|
'%(conf)s of %(group)s.replication_device',
|
||||||
|
conf=conf_dict, group=driver_conf.config_group)
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option='%s.replication_device' % driver_conf.config_group,
|
||||||
|
value=driver_conf.replication_device)
|
||||||
|
|
||||||
|
self.driver = driver
|
||||||
|
self._adapter = init_adapter(driver.get_version(), driver.protocol)
|
||||||
|
self._dst_pool = None
|
||||||
|
self._serial_number = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def device_conf(self):
|
||||||
|
conf = self.driver.configuration
|
||||||
|
conf.san_ip = self.san_ip
|
||||||
|
conf.san_login = self.san_login
|
||||||
|
conf.san_password = self.san_password
|
||||||
|
return conf
|
||||||
|
|
||||||
|
def setup_adapter(self):
|
||||||
|
if not self._adapter.is_setup:
|
||||||
|
try:
|
||||||
|
self._adapter.do_setup(self.driver, self.device_conf)
|
||||||
|
except exception.CinderException:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error('replication_device configured but its adapter '
|
||||||
|
'setup failed: %s', self.backend_id)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def adapter(self):
|
||||||
|
self.setup_adapter()
|
||||||
|
return self._adapter
|
||||||
|
|
||||||
|
@property
|
||||||
|
def destination_pool(self):
|
||||||
|
if self._dst_pool is None:
|
||||||
|
LOG.debug('getting destination pool for replication device: %s',
|
||||||
|
self.backend_id)
|
||||||
|
pools_dict = self.adapter.storage_pools_map
|
||||||
|
pool_name = random.choice(list(pools_dict))
|
||||||
|
LOG.debug('got destination pool for replication device: %s, '
|
||||||
|
'pool: %s', self.backend_id, pool_name)
|
||||||
|
self._dst_pool = pools_dict[pool_name]
|
||||||
|
|
||||||
|
return self._dst_pool
|
||||||
|
|
||||||
|
|
||||||
|
def init_adapter(version, protocol):
|
||||||
|
if protocol == unity_adapter.PROTOCOL_FC:
|
||||||
|
return unity_adapter.FCAdapter(version)
|
||||||
|
return unity_adapter.ISCSIAdapter(version)
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_ADAPTER_NAME = 'default'
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicationManager(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.is_replication_configured = False
|
||||||
|
self.default_conf = None
|
||||||
|
self.default_device = None
|
||||||
|
self.replication_devices = None
|
||||||
|
self.active_backend_id = None
|
||||||
|
|
||||||
|
def do_setup(self, driver):
|
||||||
|
self.default_conf = driver.configuration
|
||||||
|
|
||||||
|
self.replication_devices = self.parse_rep_device(driver)
|
||||||
|
if DEFAULT_ADAPTER_NAME in self.replication_devices:
|
||||||
|
LOG.error('backend_id cannot be `default`')
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option=('%s.replication_device'
|
||||||
|
% self.default_conf.config_group),
|
||||||
|
value=self.default_conf.replication_device)
|
||||||
|
|
||||||
|
# Only support one replication device currently.
|
||||||
|
if len(self.replication_devices) > 1:
|
||||||
|
LOG.error('At most one replication_device is supported')
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option=('%s.replication_device'
|
||||||
|
% self.default_conf.config_group),
|
||||||
|
value=self.default_conf.replication_device)
|
||||||
|
|
||||||
|
self.is_replication_configured = len(self.replication_devices) >= 1
|
||||||
|
|
||||||
|
self.active_backend_id = driver.active_backend_id
|
||||||
|
if self.active_backend_id:
|
||||||
|
if self.active_backend_id not in self.replication_devices:
|
||||||
|
LOG.error('Service starts under failed-over status, '
|
||||||
|
'active_backend_id: %s is not empty, but not in '
|
||||||
|
'replication_device.', self.active_backend_id)
|
||||||
|
raise exception.InvalidConfigurationValue(
|
||||||
|
option=('%s.replication_device'
|
||||||
|
% self.default_conf.config_group),
|
||||||
|
value=self.default_conf.replication_device)
|
||||||
|
else:
|
||||||
|
self.active_backend_id = DEFAULT_ADAPTER_NAME
|
||||||
|
|
||||||
|
default_device_conf = {
|
||||||
|
'backend_id': DEFAULT_ADAPTER_NAME,
|
||||||
|
'san_ip': driver.configuration.san_ip
|
||||||
|
}
|
||||||
|
self.default_device = ReplicationDevice(default_device_conf, driver)
|
||||||
|
if not self.is_service_failed_over:
|
||||||
|
# If service doesn't fail over, setup the adapter.
|
||||||
|
# Otherwise, the primary backend could be down, adapter setup could
|
||||||
|
# fail.
|
||||||
|
self.default_device.setup_adapter()
|
||||||
|
|
||||||
|
if self.is_replication_configured:
|
||||||
|
# If replication_device is configured, consider the replication is
|
||||||
|
# enabled and check the same configuration is valid for secondary
|
||||||
|
# backend or not.
|
||||||
|
self.setup_rep_adapters()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_service_failed_over(self):
|
||||||
|
return (self.active_backend_id is not None
|
||||||
|
and self.active_backend_id != DEFAULT_ADAPTER_NAME)
|
||||||
|
|
||||||
|
def setup_rep_adapters(self):
|
||||||
|
for backend_id, rep_device in self.replication_devices.items():
|
||||||
|
rep_device.setup_adapter()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def active_adapter(self):
|
||||||
|
if self.is_service_failed_over:
|
||||||
|
return self.replication_devices[self.active_backend_id].adapter
|
||||||
|
else:
|
||||||
|
self.active_backend_id = DEFAULT_ADAPTER_NAME
|
||||||
|
return self.default_device.adapter
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_rep_device(driver):
|
||||||
|
driver_conf = driver.configuration
|
||||||
|
rep_devices = {}
|
||||||
|
if not driver_conf.replication_device:
|
||||||
|
return rep_devices
|
||||||
|
|
||||||
|
for device_conf in driver_conf.replication_device:
|
||||||
|
rep_device = ReplicationDevice(device_conf, driver)
|
||||||
|
rep_devices[rep_device.backend_id] = rep_device
|
||||||
|
return rep_devices
|
||||||
|
|
||||||
|
def failover_service(self, backend_id):
|
||||||
|
self.active_backend_id = backend_id
|
@ -18,6 +18,8 @@ from __future__ import division
|
|||||||
import contextlib
|
import contextlib
|
||||||
from distutils import version
|
from distutils import version
|
||||||
import functools
|
import functools
|
||||||
|
import json
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import fnmatch
|
from oslo_utils import fnmatch
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
@ -348,3 +350,45 @@ def is_multiattach_to_host(volume_attachment, host_name):
|
|||||||
if a.attach_status == fields.VolumeAttachStatus.ATTACHED and
|
if a.attach_status == fields.VolumeAttachStatus.ATTACHED and
|
||||||
a.attached_host == host_name]
|
a.attached_host == host_name]
|
||||||
return len(attachment) > 1
|
return len(attachment) > 1
|
||||||
|
|
||||||
|
|
||||||
|
def load_replication_data(rep_data_str):
|
||||||
|
# rep_data_str is string dumped from a dict like:
|
||||||
|
# {
|
||||||
|
# 'default': 'rep_session_name_failed_over',
|
||||||
|
# 'backend_id_1': 'rep_session_name_1',
|
||||||
|
# 'backend_id_2': 'rep_session_name_2'
|
||||||
|
# }
|
||||||
|
return json.loads(rep_data_str)
|
||||||
|
|
||||||
|
|
||||||
|
def dump_replication_data(model_update, rep_data):
|
||||||
|
# rep_data is a dict like:
|
||||||
|
# {
|
||||||
|
# 'backend_id_1': 'rep_session_name_1',
|
||||||
|
# 'backend_id_2': 'rep_session_name_2'
|
||||||
|
# }
|
||||||
|
model_update['replication_driver_data'] = json.dumps(rep_data)
|
||||||
|
return model_update
|
||||||
|
|
||||||
|
|
||||||
|
def enable_replication_status(model_update, rep_data):
|
||||||
|
model_update['replication_status'] = fields.ReplicationStatus.ENABLED
|
||||||
|
return dump_replication_data(model_update, rep_data)
|
||||||
|
|
||||||
|
|
||||||
|
def error_replication_status(model_update):
|
||||||
|
# model_update is a dict like:
|
||||||
|
# {
|
||||||
|
# 'volume_id': volume.id,
|
||||||
|
# 'updates': {
|
||||||
|
# 'provider_id': new_provider_id,
|
||||||
|
# 'provider_location': new_provider_location,
|
||||||
|
# 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR,
|
||||||
|
# ...
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
model_update['updates']['replication_status'] = (
|
||||||
|
fields.ReplicationStatus.FAILOVER_ERROR
|
||||||
|
)
|
||||||
|
return model_update
|
||||||
|
@ -15,7 +15,7 @@ Prerequisites
|
|||||||
+===================+=================+
|
+===================+=================+
|
||||||
| Unity OE | 4.1.X or newer |
|
| Unity OE | 4.1.X or newer |
|
||||||
+-------------------+-----------------+
|
+-------------------+-----------------+
|
||||||
| storops | 0.5.10 or newer |
|
| storops | 1.1.0 or newer |
|
||||||
+-------------------+-----------------+
|
+-------------------+-----------------+
|
||||||
|
|
||||||
|
|
||||||
@ -41,6 +41,7 @@ Supported operations
|
|||||||
- Clone a consistent group.
|
- Clone a consistent group.
|
||||||
- Create a consistent group from a snapshot.
|
- Create a consistent group from a snapshot.
|
||||||
- Attach a volume to multiple servers simultaneously (multiattach).
|
- Attach a volume to multiple servers simultaneously (multiattach).
|
||||||
|
- Volume replications.
|
||||||
|
|
||||||
Driver configuration
|
Driver configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -411,6 +412,63 @@ snapshots, the volume type extra specs would also have the following entry:
|
|||||||
Refer to :doc:`/admin/blockstorage-groups`
|
Refer to :doc:`/admin/blockstorage-groups`
|
||||||
for command lines detail.
|
for command lines detail.
|
||||||
|
|
||||||
|
Volume replications
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To enable volume replications, follow below steps:
|
||||||
|
|
||||||
|
1. On Unisphere, configure remote system and interfaces for replications.
|
||||||
|
|
||||||
|
The way could be different depending on the type of replications - sync or async.
|
||||||
|
Refer to `Unity Replication White Paper
|
||||||
|
<https://www.emc.com/collateral/white-papers/h15088-dell-emc-unity-replication-technologies.pdf>`_
|
||||||
|
for more detail.
|
||||||
|
|
||||||
|
2. Add `replication_device` to storage backend settings in `cinder.conf`, then
|
||||||
|
restart Cinder Volume service.
|
||||||
|
|
||||||
|
Example of `cinder.conf` for volume replications:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[unity-primary]
|
||||||
|
san_ip = xxx.xxx.xxx.xxx
|
||||||
|
...
|
||||||
|
replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_password:****,max_time_out_of_sync:60
|
||||||
|
|
||||||
|
- Only one `replication_device` can be configured for each primary backend.
|
||||||
|
- Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync`
|
||||||
|
are supported in `replication_device`, while `backend_id` and `san_ip`
|
||||||
|
are required.
|
||||||
|
- `san_password` uses the same one as primary backend's if it is omitted.
|
||||||
|
- `max_time_out_of_sync` is the max time in minutes replications are out of
|
||||||
|
sync. It must be equal or greater than `0`. `0` means sync replications
|
||||||
|
of volumes will be created. Note that remote systems for sync replications
|
||||||
|
need to be created on Unity first. `60` will be used if it is omitted.
|
||||||
|
|
||||||
|
#. Create a volume type with property `replication_enabled='<is> True'`.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ openstack volume type create --property replication_enabled='<is> True' type-replication
|
||||||
|
|
||||||
|
#. Any volumes with volume type of step #3 will failover to secondary backend
|
||||||
|
after `failover_host` is executed.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ cinder failover-host --backend_id unity-secondary stein@unity-primary
|
||||||
|
|
||||||
|
#. Later, they could be failed back.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ cinder failover-host --backend_id default stein@unity-primary
|
||||||
|
|
||||||
|
.. note:: The volume can be deleted even when it is participating in a
|
||||||
|
replication. The replication session will be deleted from Unity before the
|
||||||
|
LUN is deleted.
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ driver.datera=missing
|
|||||||
driver.dell_emc_powermax=complete
|
driver.dell_emc_powermax=complete
|
||||||
driver.dell_emc_ps=missing
|
driver.dell_emc_ps=missing
|
||||||
driver.dell_emc_sc=complete
|
driver.dell_emc_sc=complete
|
||||||
driver.dell_emc_unity=missing
|
driver.dell_emc_unity=complete
|
||||||
driver.dell_emc_vmax_af=complete
|
driver.dell_emc_vmax_af=complete
|
||||||
driver.dell_emc_vmax_3=complete
|
driver.dell_emc_vmax_3=complete
|
||||||
driver.dell_emc_vnx=complete
|
driver.dell_emc_vnx=complete
|
||||||
|
@ -31,7 +31,7 @@ rados # LGPLv2.1
|
|||||||
rbd # LGPLv2.1
|
rbd # LGPLv2.1
|
||||||
|
|
||||||
# Dell EMC VNX and Unity
|
# Dell EMC VNX and Unity
|
||||||
storops>=0.5.10 # Apache-2.0
|
storops>=1.1.0 # Apache-2.0
|
||||||
|
|
||||||
# INFINIDAT
|
# INFINIDAT
|
||||||
infinisdk # BSD-3
|
infinisdk # BSD-3
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Dell EMC Unity Driver: Added volume replication support.
|
Loading…
Reference in New Issue
Block a user