PowerMax Driver - Support of Multiple Replication

Add support for multiple replication devices in the PowerMax
driver for Cinder to allow the use of multiple replication
modes concurrently.

Change-Id: I8fcd5bb0209ede5099a9f2f5c23b7da5b59a819a
Implements: blueprint powermax-multiple-replication-devices
This commit is contained in:
odonos12 2020-02-25 09:05:41 +00:00 committed by Helen Walsh
parent 1870d2ac0c
commit a4c13863c4
18 changed files with 1758 additions and 555 deletions

View File

@ -38,7 +38,6 @@ class PowerMaxData(object):
array_herc = '000197900123'
array_model = 'PowerMax_8000'
srp = 'SRP_1'
srp2 = 'SRP_2'
slo = 'Diamond'
slo_diamond = 'Diamond'
slo_silver = 'Silver'
@ -71,15 +70,22 @@ class PowerMaxData(object):
device_id2 = '00002'
device_id3 = '00003'
device_id4 = '00004'
rdf_group_name = '23_24_007'
rdf_group_no = '70'
rdf_group_name_1 = '23_24_007'
rdf_group_name_2 = '23_24_008'
rdf_group_name_3 = '23_24_009'
rdf_group_name_4 = '23_24_010'
rdf_group_no_1 = '70'
rdf_group_no_2 = '71'
rdf_group_no_3 = '72'
rdf_group_no_4 = '73'
u4v_version = '91'
storagegroup_name_source = 'Grp_source_sg'
storagegroup_name_target = 'Grp_target_sg'
group_snapshot_name = 'Grp_snapshot'
target_group_name = 'Grp_target'
storagegroup_name_with_id = 'GrpId_group_name'
rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name
rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name_1
default_sg_re_managed_list = [default_sg_re_enabled, rdf_managed_async_grp]
volume_id = '2b06255d-f5f0-4520-a953-b029196add6a'
no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG'
temp_snapvx = 'temp-00001-snapshot_for_clone'
@ -266,6 +272,87 @@ class PowerMaxData(object):
test_host = {'capabilities': location_info,
'host': fake_host}
# replication
rep_backend_id_sync = 'rep_backend_id_sync'
rep_backend_id_async = 'rep_backend_id_async'
rep_backend_id_metro = 'rep_backend_id_metro'
rep_backend_id_sync_2 = 'rep_backend_id_sync_2'
rep_dev_1 = {
utils.BACKEND_ID: rep_backend_id_sync,
'target_device_id': remote_array,
'remote_port_group': port_group_name_f,
'remote_pool': srp,
'rdf_group_label': rdf_group_name_1,
'mode': utils.REP_SYNC,
'allow_extend': True}
rep_dev_2 = {
utils.BACKEND_ID: rep_backend_id_async,
'target_device_id': remote_array,
'remote_port_group': port_group_name_f,
'remote_pool': srp,
'rdf_group_label': rdf_group_name_2,
'mode': utils.REP_ASYNC,
'allow_extend': True}
rep_dev_3 = {
utils.BACKEND_ID: rep_backend_id_metro,
'target_device_id': remote_array,
'remote_port_group': port_group_name_f,
'remote_pool': srp,
'rdf_group_label': rdf_group_name_3,
'mode': utils.REP_METRO,
'allow_extend': True}
sync_rep_device = [rep_dev_1]
async_rep_device = [rep_dev_2]
metro_rep_device = [rep_dev_3]
multi_rep_device = [rep_dev_1, rep_dev_2, rep_dev_3]
rep_config_sync = {
utils.BACKEND_ID: rep_backend_id_sync,
'array': remote_array,
'portgroup': port_group_name_f,
'srp': srp,
'rdf_group_label': rdf_group_name_1,
'mode': utils.REP_SYNC,
'allow_extend': True,
'sync_interval': 3,
'sync_retries': 200}
rep_config_async = {
utils.BACKEND_ID: rep_backend_id_async,
'array': remote_array,
'portgroup': port_group_name_f,
'srp': srp,
'rdf_group_label': rdf_group_name_2,
'mode': utils.REP_ASYNC,
'allow_extend': True,
'sync_interval': 3,
'sync_retries': 200}
rep_config_metro = {
utils.BACKEND_ID: rep_backend_id_metro,
'array': remote_array,
'portgroup': port_group_name_f,
'srp': srp,
'rdf_group_label': rdf_group_name_3,
'mode': utils.REP_METRO,
'allow_extend': True,
'sync_interval': 3,
'sync_retries': 200}
rep_config_sync_2 = {
utils.BACKEND_ID: rep_backend_id_sync_2,
'array': remote_array,
'portgroup': port_group_name_f,
'srp': srp,
'rdf_group_label': rdf_group_name_1,
'mode': utils.REP_SYNC,
'allow_extend': True,
'sync_interval': 3,
'sync_retries': 200}
sync_rep_config_list = [rep_config_sync]
async_rep_config_list = [rep_config_async]
metro_rep_config_list = [rep_config_metro]
multi_rep_config_list = [rep_config_sync, rep_config_async,
rep_config_metro, rep_config_sync_2]
# extra-specs
vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'}
vol_type_extra_specs_compr_disabled = {
@ -274,6 +361,18 @@ class PowerMaxData(object):
vol_type_extra_specs_rep_enabled = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True'}
vol_type_extra_specs_rep_enabled_backend_id_sync = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True',
utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync}
vol_type_extra_specs_rep_enabled_backend_id_sync_2 = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True',
utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync_2}
vol_type_extra_specs_rep_enabled_backend_id_async = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True',
utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_async}
extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'slo': slo,
'workload': workload,
@ -302,21 +401,23 @@ class PowerMaxData(object):
rep_extra_specs['array'] = remote_array
rep_extra_specs['interval'] = 1
rep_extra_specs['retries'] = 1
rep_extra_specs['srp'] = srp2
rep_extra_specs['srp'] = srp
rep_extra_specs['rep_mode'] = 'Synchronous'
rep_extra_specs['sync_interval'] = 3
rep_extra_specs['sync_retries'] = 200
rep_extra_specs['rdf_group_label'] = rdf_group_name
rep_extra_specs['rdf_group_no'] = rdf_group_no
rep_extra_specs['rdf_group_label'] = rdf_group_name_1
rep_extra_specs['rdf_group_no'] = rdf_group_no_1
rep_extra_specs2 = deepcopy(rep_extra_specs)
rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f
rep_extra_specs3 = deepcopy(rep_extra_specs)
rep_extra_specs3['slo'] = slo
rep_extra_specs3['workload'] = workload
rep_extra_specs4 = deepcopy(rep_extra_specs3)
rep_extra_specs4['rdf_group_label'] = rdf_group_name
rep_extra_specs4['rdf_group_label'] = rdf_group_name_1
rep_extra_specs5 = deepcopy(rep_extra_specs2)
rep_extra_specs5['target_array_model'] = 'VMAX250F'
rep_extra_specs5['sync_interval'] = 3
rep_extra_specs5['sync_retries'] = 200
rep_extra_specs6 = deepcopy(rep_extra_specs3)
rep_extra_specs6['target_array_model'] = 'PMAX2000'
@ -328,6 +429,9 @@ class PowerMaxData(object):
rep_extra_specs_legacy = deepcopy(rep_extra_specs_ode)
rep_extra_specs_legacy['mode'] = 'Synchronous'
rep_extra_specs_rep_config = deepcopy(rep_extra_specs6)
rep_extra_specs_rep_config[utils.REP_CONFIG] = rep_config_sync
extra_specs_tags = deepcopy(extra_specs)
extra_specs_tags.update({utils.STORAGE_GROUP_TAGS: sg_tags})
@ -335,8 +439,8 @@ class PowerMaxData(object):
rep_extra_specs_mgmt['srp'] = srp
rep_extra_specs_mgmt['mgmt_sg_name'] = rdf_managed_async_grp
rep_extra_specs_mgmt['sg_name'] = default_sg_no_slo_re_enabled
rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no
rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name
rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no_1
rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name_1
rep_extra_specs_mgmt['target_array_model'] = array_model
rep_extra_specs_mgmt['slo'] = 'Diamond'
rep_extra_specs_mgmt['workload'] = 'NONE'
@ -350,8 +454,8 @@ class PowerMaxData(object):
rep_config = {
'array': remote_array, 'srp': srp, 'portgroup': port_group_name_i,
'rdf_group_no': rdf_group_no, 'sync_retries': 200,
'sync_interval': 1, 'rdf_group_label': rdf_group_name,
'rdf_group_no': rdf_group_no_1, 'sync_retries': 200,
'sync_interval': 1, 'rdf_group_label': rdf_group_name_1,
'allow_extend': True, 'mode': utils.REP_METRO}
ex_specs_rep_config = deepcopy(rep_extra_specs_metro)
@ -593,12 +697,12 @@ class PowerMaxData(object):
sg_rdf_details = [{'storageGroupName': test_vol_grp_name,
'symmetrixId': array,
'modes': ['Synchronous'],
'rdfGroupNumber': rdf_group_no,
'rdfGroupNumber': rdf_group_no_1,
'states': ['Synchronized']},
{'storageGroupName': test_fo_vol_group,
'symmetrixId': array,
'modes': ['Synchronous'],
'rdfGroupNumber': rdf_group_no,
'rdfGroupNumber': rdf_group_no_1,
'states': ['Failed Over']}]
sg_rdf_group_details = {
@ -812,19 +916,25 @@ class PowerMaxData(object):
{'name': 'another-target',
'percentageCopied': 90}]
rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no,
'label': rdf_group_name}]}
rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no_1,
'label': rdf_group_name_1},
{'rdfgNumber': rdf_group_no_2,
'label': rdf_group_name_2},
{'rdfgNumber': rdf_group_no_3,
'label': rdf_group_name_3},
{'rdfgNumber': rdf_group_no_4,
'label': rdf_group_name_4}]}
rdf_group_details = {'modes': ['Synchronous'],
'remoteSymmetrix': remote_array,
'label': rdf_group_name,
'label': rdf_group_name_1,
'type': 'Dynamic',
'numDevices': 1,
'remoteRdfgNumber': rdf_group_no,
'rdfgNumber': rdf_group_no}
rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no,
'remoteRdfgNumber': rdf_group_no_1,
'rdfgNumber': rdf_group_no_1}
rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no_1,
'localSymmetrixId': array,
'volumeConfig': 'RDF1+TDEV',
'localRdfGroupNumber': rdf_group_no,
'localRdfGroupNumber': rdf_group_no_1,
'localVolumeName': device_id,
'rdfpairState': 'Synchronized',
'remoteVolumeName': device_id2,
@ -834,8 +944,8 @@ class PowerMaxData(object):
'remoteSymmetrixId': remote_array}
rdf_group_vol_details_not_synced = {
'remoteRdfGroupNumber': rdf_group_no, 'localSymmetrixId': array,
'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no,
'remoteRdfGroupNumber': rdf_group_no_1, 'localSymmetrixId': array,
'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no_1,
'localVolumeName': device_id, 'rdfpairState': 'syncinprog',
'remoteVolumeName': device_id2, 'localVolumeState': 'Ready',
'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled',
@ -1096,6 +1206,8 @@ class PowerMaxData(object):
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}]
volume_create_info_dict = {utils.ARRAY: array, utils.DEVICE_ID: device_id}
volume_info_dict = {
'volume_id': volume_id,
'service_level': 'Diamond',
@ -1327,8 +1439,8 @@ class PowerMaxData(object):
'device_id': device_id,
'local_array': array, 'remote_array': remote_array,
'target_device_id': device_id2, 'target_name': 'test_vol',
'rdf_group_no': rdf_group_no, 'rep_mode': 'Metro',
'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name,
'rdf_group_no': rdf_group_no_1, 'rep_mode': 'Metro',
'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name_1,
'target_array_model': array_model,
'rdf_mgmt_grp': rdf_managed_async_grp}

View File

@ -200,7 +200,7 @@ class FakeRequestsSession(object):
elif 'rdf_group' in url:
if self.data.device_id in url:
return_object = self.data.rdf_group_vol_details
elif self.data.rdf_group_no in url:
elif self.data.rdf_group_no_1 in url:
return_object = self.data.rdf_group_details
else:
return_object = self.data.rdf_group_list
@ -283,7 +283,7 @@ class FakeConfiguration(object):
self.config_group = volume_backend_name
self.san_is_local = False
if replication_device:
self.replication_device = [replication_device]
self.replication_device = replication_device
for key, value in kwargs.items():
if key == 'san_login':
self.san_login = value

View File

@ -44,12 +44,14 @@ class PowerMaxCommonTest(test.TestCase):
super(PowerMaxCommonTest, self).setUp()
self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
return_value=1.0)
replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='CommonTests', interval=1,
retries=1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_f],
powermax_port_group_name_template='portGroupName')
powermax_port_group_name_template='portGroupName',
replication_device=replication_device)
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
@ -205,11 +207,11 @@ class PowerMaxCommonTest(test.TestCase):
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
return_value=tpd.PowerMaxData.volume_metadata)
def test_create_volume_qos(self, mck_meta):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': ''})
'metadata': self.data.volume_metadata})
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['qos'] = {
'total_iops_sec': '4000', 'DistributionType': 'Always'}
@ -240,6 +242,84 @@ class PowerMaxCommonTest(test.TestCase):
ast.literal_eval(ref_model_update['provider_location']),
ast.literal_eval(model_update['provider_location']))
@mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates',
return_value=(tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group')
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
return_value=tpd.PowerMaxData.volume_create_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=(True, tpd.PowerMaxData.rep_extra_specs5,
tpd.PowerMaxData.rep_info_dict, True))
def test_create_replication_enabled_volume_first_volume(
self, mck_prep, mck_create, mck_protect, mck_updates):
array = self.data.array
volume = self.data.test_volume
volume_name = volume.name
volume_size = volume.size
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs5 = self.data.rep_extra_specs5
storagegroup_name = self.data.storagegroup_name_f
rep_info_dict = self.data.rep_info_dict
rep_vol = deepcopy(self.data.volume_create_info_dict)
rep_vol.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
vol, update, info = self.common._create_replication_enabled_volume(
array, volume, volume_name, volume_size, rep_extra_specs,
storagegroup_name, rep_extra_specs['rep_mode'])
mck_prep.assert_called_once_with(self.data.rep_extra_specs)
mck_create.assert_called_once_with(
array, volume_name, storagegroup_name, volume_size,
rep_extra_specs, rep_info_dict)
mck_protect.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
rep_vol.update({'remote_device_id': self.data.device_id2})
mck_updates.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
self.assertEqual(self.data.volume_create_info_dict, vol)
self.assertEqual(self.data.replication_update, update)
self.assertEqual(self.data.rep_info_dict, info)
@mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates',
return_value=(tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group')
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
return_value=tpd.PowerMaxData.volume_create_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
side_effect=((False, '', '', True),
('', tpd.PowerMaxData.rep_extra_specs5,
tpd.PowerMaxData.rep_info_dict, '')))
def test_create_replication_enabled_volume_not_first_volume(
self, mck_prepare, mck_create, mck_protect, mck_updates):
array = self.data.array
volume = self.data.test_volume
volume_name = volume.name
volume_size = volume.size
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs5 = self.data.rep_extra_specs5
storagegroup_name = self.data.storagegroup_name_f
rep_info_dict = self.data.rep_info_dict
rep_vol = deepcopy(self.data.volume_create_info_dict)
rep_vol.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
vol, update, info = self.common._create_replication_enabled_volume(
array, volume, volume_name, volume_size, rep_extra_specs,
storagegroup_name, rep_extra_specs['rep_mode'])
self.assertEqual(2, mck_prepare.call_count)
mck_create.assert_called_once_with(
array, volume_name, storagegroup_name, volume_size,
rep_extra_specs, rep_info_dict)
mck_protect.assert_not_called()
rep_vol.update({'remote_device_id': self.data.device_id2})
mck_updates.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
self.assertEqual(self.data.volume_create_info_dict, vol)
self.assertEqual(self.data.replication_update, update)
self.assertEqual(self.data.rep_info_dict, info)
@mock.patch.object(common.PowerMaxCommon, '_clone_check')
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
@ -823,6 +903,7 @@ class PowerMaxCommonTest(test.TestCase):
ref_mv_dict = self.data.masking_view_dict
self.common.next_gen = False
self.common.powermax_port_group_name_template = 'portGroupName'
extra_specs.pop(utils.IS_RE, None)
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
@ -1027,6 +1108,7 @@ class PowerMaxCommonTest(test.TestCase):
mock_delete.assert_not_called()
def test_create_volume_success(self):
volume = self.data.test_volume
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
@ -1035,11 +1117,12 @@ class PowerMaxCommonTest(test.TestCase):
return_value=self.data.volume_details[0]):
volume_dict, rep_update, rep_info_dict = (
self.common._create_volume(
volume_name, volume_size, extra_specs))
volume, volume_name, volume_size, extra_specs))
self.assertEqual(ref_response,
(volume_dict, rep_update, rep_info_dict))
def test_create_volume_success_next_gen(self):
volume = self.data.test_volume
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
@ -1053,38 +1136,86 @@ class PowerMaxCommonTest(test.TestCase):
self.masking,
'get_or_create_default_storage_group') as mock_get:
self.common._create_volume(
volume_name, volume_size, extra_specs)
volume, volume_name, volume_size, extra_specs)
mock_get.assert_called_once_with(
extra_specs['array'], extra_specs[utils.SRP],
extra_specs[utils.SLO], 'NONE', extra_specs, True,
False, None)
def test_create_volume_failed(self):
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
side_effect=exception.VolumeBackendAPIException(''))
@mock.patch.object(common.PowerMaxCommon,
'_cleanup_volume_create_post_failure')
@mock.patch.object(rest.PowerMaxRest, 'delete_storage_group')
def test_create_volume_failed(self, mck_del, mck_cleanup, mck_create):
volume = self.data.test_volume
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
dev1 = self.data.device_id
dev2 = self.data.device_id2
with mock.patch.object(
self.masking, 'get_or_create_default_storage_group',
return_value=self.data.failed_resource):
with mock.patch.object(
self.rest, 'delete_storage_group') as mock_delete:
# path 1: not last vol in sg
with mock.patch.object(
self.rest, 'get_num_vols_in_sg', return_value=2):
self.rest, 'get_volumes_in_storage_group',
side_effect=[[dev1], [dev1, dev2]]):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
mock_delete.assert_not_called()
# path 2: last vol in sg, delete sg
with mock.patch.object(self.rest, 'get_num_vols_in_sg',
return_value=0):
volume, volume_name, volume_size,
extra_specs)
mck_cleanup.assert_called_once_with(
volume, volume_name, extra_specs, [dev2])
# path 2: no new volumes created
with mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
side_effect=[[], []]):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
mock_delete.assert_called_once_with(
self.data.array, self.data.failed_resource)
volume, volume_name, volume_size,
extra_specs)
mck_del.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair')
@mock.patch.object(
rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [
{utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}]))
def test_cleanup_volume_create_post_failure_rdf_enabled(
self, mck_in, mck_clean, mck_del):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
devices = [self.data.device_id]
self.common._cleanup_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_in.assert_called_once_with(array, self.data.device_id)
mck_clean.assert_called_once_with(
array, self.data.rdf_group_no_1, self.data.device_id, extra_specs)
mck_del.assert_called_once_with(
array, self.data.device_id, volume_name, extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
@mock.patch.object(
rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', ''))
def test_cleanup_volume_create_post_failure_rdf_disabled(
self, mck_in, mck_remove, mck_del):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
devices = [self.data.device_id]
self.common._cleanup_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_in.assert_called_once_with(array, self.data.device_id)
mck_remove.assert_called_once_with(
array, volume, self.data.device_id, volume_name, extra_specs,
False)
mck_del.assert_called_once_with(
array, self.data.device_id, volume_name, extra_specs)
def test_create_volume_incorrect_slo(self):
volume = self.data.test_volume
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = {'slo': 'Diamondz',
@ -1094,7 +1225,7 @@ class PowerMaxCommonTest(test.TestCase):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
volume, volume_name, volume_size, extra_specs)
def test_set_vmax_extra_specs(self):
srp_record = self.common.get_attributes_from_cinder_config()
@ -1236,8 +1367,7 @@ class PowerMaxCommonTest(test.TestCase):
self.assertEqual([], metro_wwns)
# Is metro volume
with mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=self.data.rep_extra_specs_ode):
self.common.rep_config = self.data.rep_config
return_value=self.data.ex_specs_rep_config):
__, metro_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertEqual([self.data.wwnn1], metro_wwns)
@ -2416,7 +2546,7 @@ class PowerMaxCommonTest(test.TestCase):
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
extra_specs = self.data.extra_specs
self.common._extend_vol_validation_checks(
array, device_id, volume.name, extra_specs, volume.size, new_size)
@ -2428,7 +2558,7 @@ class PowerMaxCommonTest(test.TestCase):
array = self.data.array
device_id = None
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
@ -2457,7 +2587,7 @@ class PowerMaxCommonTest(test.TestCase):
array = self.data.array
device_id = self.data.device_id
new_size = volume.size - 1
extra_specs = deepcopy(self.data.extra_specs)
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
@ -2469,7 +2599,7 @@ class PowerMaxCommonTest(test.TestCase):
self.common.next_gen = False
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, True)
array, self.data.rep_config_metro, True)
self.assertFalse(r1_ode)
self.assertFalse(r1_ode_metro)
self.assertFalse(r2_ode)
@ -2487,7 +2617,7 @@ class PowerMaxCommonTest(test.TestCase):
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, False)
array, self.data.rep_config_metro, False)
self.assertTrue(r1_ode)
self.assertFalse(r1_ode_metro)
self.assertFalse(r2_ode)
@ -2505,7 +2635,7 @@ class PowerMaxCommonTest(test.TestCase):
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, True)
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertFalse(r2_ode)
@ -2523,7 +2653,7 @@ class PowerMaxCommonTest(test.TestCase):
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, True)
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertTrue(r2_ode)
@ -2541,7 +2671,7 @@ class PowerMaxCommonTest(test.TestCase):
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, True)
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertTrue(r2_ode)
@ -2562,7 +2692,7 @@ class PowerMaxCommonTest(test.TestCase):
device_id = self.data.device_id
new_size = volume.size + 1
extra_specs = self.data.extra_specs
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
self.common._extend_legacy_replicated_vol(
array, volume, device_id, volume.name, new_size, extra_specs,
rdf_group_no)
@ -2578,7 +2708,7 @@ class PowerMaxCommonTest(test.TestCase):
device_id = self.data.device_id
new_size = volume.size + 1
extra_specs = self.data.extra_specs
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_legacy_replicated_vol,
@ -2605,12 +2735,6 @@ class PowerMaxCommonTest(test.TestCase):
port = self.common._get_unisphere_port()
self.assertEqual(ref_port, port)
@mock.patch.object(utils.PowerMaxUtils,
'get_replication_config')
def test_get_replication_info(self, mock_config):
self.common._get_replication_info()
mock_config.assert_not_called()
@mock.patch.object(common.PowerMaxCommon,
'_do_sync_check')
def test_sync_check_no_source_device_on_array(self, mock_check):
@ -2775,7 +2899,7 @@ class PowerMaxCommonTest(test.TestCase):
'R2-ArrayID': self.data.remote_array,
'R2-ArrayModel': self.data.array_model,
'ReplicationMode': 'Synchronized',
'RDFG-Label': self.data.rdf_group_name,
'RDFG-Label': self.data.rdf_group_name_1,
'R1-RDFG': 1, 'R2-RDFG': 1}
array = self.data.array
device_id = self.data.device_id
@ -2801,7 +2925,7 @@ class PowerMaxCommonTest(test.TestCase):
'R2-ArrayID': self.data.remote_array,
'R2-ArrayModel': self.data.array_model,
'ReplicationMode': 'Metro',
'RDFG-Label': self.data.rdf_group_name,
'RDFG-Label': self.data.rdf_group_name_1,
'R1-RDFG': 1, 'R2-RDFG': 1}
array = self.data.array
device_id = self.data.device_id
@ -3023,18 +3147,20 @@ class PowerMaxCommonTest(test.TestCase):
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
extra_specs = self.data.rep_extra_specs
extra_specs = deepcopy(self.data.rep_extra_specs)
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
group_name = self.data.rdf_managed_async_grp
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs, remote=True)
mck_get_rdf.assert_called_once_with(self.common.rep_config)
mck_get_rdf.assert_called_once_with(self.data.rep_config_async)
mck_get_vol.assert_called_once_with(array, device_id)
mck_get_sg.assert_called_once_with(
array, srp, target_slo, target_workload, extra_specs,
@ -3084,9 +3210,10 @@ class PowerMaxCommonTest(test.TestCase):
volume_name = self.data.volume_id
extra_specs = self.data.rep_extra_specs
target_slo = self.data.slo_silver
target_workload = deepcopy(self.data.workload)
target_extra_specs = self.data.rep_extra_specs
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
@ -3122,6 +3249,7 @@ class PowerMaxCommonTest(test.TestCase):
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
@ -3153,11 +3281,13 @@ class PowerMaxCommonTest(test.TestCase):
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
extra_specs = self.data.rep_extra_specs
extra_specs = deepcopy(self.data.rep_extra_specs)
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,

View File

@ -35,9 +35,13 @@ class PowerMaxMaskingTest(test.TestCase):
self.data = tpd.PowerMaxData()
super(PowerMaxMaskingTest, self).setUp()
volume_utils.get_max_over_subscription_ratio = mock.Mock()
configuration = mock.Mock()
configuration.safe_get.return_value = 'MaskingTests'
configuration.config_group = 'MaskingTests'
self.replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
None, 'MaskingTests', 1, 1, san_ip='1.1.1.1',
san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1',
san_password='smc', san_api_port=8443,
vmax_port_groups=[self.data.port_group_name_f],
replication_device=self.replication_device)
self._gather_info = common.PowerMaxCommon._gather_info
common.PowerMaxCommon._get_u4p_failover_info = mock.Mock()
common.PowerMaxCommon._gather_info = mock.Mock()
@ -50,7 +54,7 @@ class PowerMaxMaskingTest(test.TestCase):
self.driver = driver
self.driver_fc = driver_fc
self.mask = self.driver.masking
self.extra_specs = self.data.extra_specs
self.extra_specs = deepcopy(self.data.extra_specs)
self.extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_i
self.maskingviewdict = self.driver._populate_masking_dict(
self.data.test_volume, self.data.connector, self.extra_specs)
@ -148,19 +152,30 @@ class PowerMaxMaskingTest(test.TestCase):
self.data.storagegroup_name_i, self.extra_specs)
self.assertIsNotNone(msg)
@mock.patch.object(rest.PowerMaxRest, 'modify_storage_group',
return_value=(200, tpfo.tpd.PowerMaxData.job_list[0]))
@mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg')
@mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child',
side_effect=[None, tpd.PowerMaxData.parent_sg_f])
@mock.patch.object(
rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1, 1])
def test_move_volume_between_storage_groups(
self, mock_num, mock_parent, mock_rm):
self, mock_num, mock_parent, mock_rm, mck_mod):
for x in range(0, 3):
self.driver.masking.move_volume_between_storage_groups(
self.data.array, self.data.device_id,
self.data.storagegroup_name_i, self.data.storagegroup_name_f,
self.data.extra_specs)
mock_rm.assert_called_once()
ref_payload = (
{"executionOption": "ASYNCHRONOUS",
"editStorageGroupActionParam": {
"moveVolumeToStorageGroupParam": {
"volumeId": [self.data.device_id],
"storageGroupId": self.data.storagegroup_name_f,
"force": 'false'}}})
mck_mod.assert_called_with(
self.data.array, self.data.storagegroup_name_i, ref_payload)
@mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg')
@mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child',
@ -786,6 +801,8 @@ class PowerMaxMaskingTest(test.TestCase):
mock_return.assert_called_once()
def test_add_volume_to_default_storage_group_next_gen(self):
extra_specs = deepcopy(self.data.extra_specs)
extra_specs.pop(utils.IS_RE, None)
with mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array',
return_value=True):
with mock.patch.object(
@ -793,11 +810,11 @@ class PowerMaxMaskingTest(test.TestCase):
'get_or_create_default_storage_group') as mock_get:
self.mask.add_volume_to_default_storage_group(
self.data.array, self.device_id, self.volume_name,
self.extra_specs)
extra_specs)
mock_get.assert_called_once_with(
self.data.array, self.data.srp,
self.extra_specs[utils.SLO],
'NONE', self.extra_specs, False, False, None)
extra_specs[utils.SLO],
'NONE', extra_specs, False, False, None)
@mock.patch.object(provision.PowerMaxProvision, 'create_storage_group')
def test_get_or_create_default_storage_group(self, mock_create_sg):
@ -863,7 +880,8 @@ class PowerMaxMaskingTest(test.TestCase):
mock_delete_ig.assert_called_once()
def test_populate_masking_dict_init_check_false(self):
extra_specs = self.data.extra_specs
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.driver, '_get_initiator_check_flag',
return_value=False):
@ -872,7 +890,8 @@ class PowerMaxMaskingTest(test.TestCase):
self.assertFalse(masking_view_dict['initiator_check'])
def test_populate_masking_dict_init_check_true(self):
extra_specs = self.data.extra_specs
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.driver, '_get_initiator_check_flag',
return_value=True):

View File

@ -112,7 +112,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase):
def test_capture_failover_volume(self, mock_uvim):
self.volume_metadata.capture_failover_volume(
self.data.test_volume, self.data.device_id2,
self.data.remote_array, self.data.rdf_group_name,
self.data.remote_array, self.data.rdf_group_name_1,
self.data.device_id, self.data.array,
self.data.extra_specs, True, None,
fields.ReplicationStatus.FAILED_OVER, utils.REP_SYNC)
@ -162,7 +162,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase):
self.data.test_volume, self.data.device_id, self.data.array,
self.data.srp, self.data.slo, self.data.workload,
self.data.storagegroup_name_target, False, None,
False)
False, None)
mock_uvim.assert_called_once()
def test_update_volume_info_metadata(self):

View File

@ -285,7 +285,7 @@ class PowerMaxProvisionTest(test.TestCase):
device_id = self.data.device_id
new_size = '3'
extra_specs = self.data.extra_specs
rdfg_num = self.data.rdf_group_no
rdfg_num = self.data.rdf_group_no_1
with mock.patch.object(self.provision.rest, 'extend_volume'
) as mock_ex:
self.provision.extend_volume(array, device_id, new_size,
@ -396,7 +396,7 @@ class PowerMaxProvisionTest(test.TestCase):
array = self.data.array
device_id = self.data.device_id
sg_name = self.data.storagegroup_name_f
rdf_group = self.data.rdf_group_no
rdf_group = self.data.rdf_group_no_1
extra_specs = self.data.rep_extra_specs
# sync still in progress
@ -519,7 +519,7 @@ class PowerMaxProvisionTest(test.TestCase):
def test_replicate_group(self, mock_create):
self.rest.replicate_group(
self.data.array, self.data.test_rep_group,
self.data.rdf_group_no, self.data.remote_array,
self.data.rdf_group_no_1, self.data.remote_array,
self.data.extra_specs)
mock_create.assert_called_once()

View File

@ -41,12 +41,7 @@ class PowerMaxReplicationTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxReplicationTest, self).setUp()
self.replication_device = {
'target_device_id': self.data.remote_array,
'remote_port_group': self.data.port_group_name_f,
'remote_pool': self.data.srp2,
'rdf_group_label': self.data.rdf_group_name,
'allow_extend': 'True'}
self.replication_device = self.data.sync_rep_device
volume_utils.get_max_over_subscription_ratio = mock.Mock()
configuration = tpfo.FakeConfiguration(
None, 'CommonReplicationTests', interval=1, retries=1,
@ -78,12 +73,7 @@ class PowerMaxReplicationTest(test.TestCase):
self.extra_specs['retries'] = 1
self.extra_specs['interval'] = 1
self.extra_specs['rep_mode'] = 'Synchronous'
self.async_rep_device = {
'target_device_id': self.data.remote_array,
'remote_port_group': self.data.port_group_name_f,
'remote_pool': self.data.srp2,
'rdf_group_label': self.data.rdf_group_name,
'allow_extend': 'True', 'mode': 'async'}
self.async_rep_device = self.data.async_rep_device
async_configuration = tpfo.FakeConfiguration(
None, 'CommonReplicationTests', interval=1, retries=1,
san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array,
@ -92,12 +82,7 @@ class PowerMaxReplicationTest(test.TestCase):
replication_device=self.async_rep_device)
self.async_driver = fc.PowerMaxFCDriver(
configuration=async_configuration)
self.metro_rep_device = {
'target_device_id': self.data.remote_array,
'remote_port_group': self.data.port_group_name_f,
'remote_pool': self.data.srp2,
'rdf_group_label': self.data.rdf_group_name,
'allow_extend': 'True', 'mode': 'metro'}
self.metro_rep_device = self.data.metro_rep_device
metro_configuration = tpfo.FakeConfiguration(
None, 'CommonReplicationTests', interval=1, retries=1,
san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array,
@ -121,9 +106,9 @@ class PowerMaxReplicationTest(test.TestCase):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.IS_RE] = True
rep_config = self.utils.get_replication_config(
[self.replication_device])
rep_config = self.data.rep_config_sync
rep_config[utils.RDF_CONS_EXEMPT] = False
extra_specs[utils.REP_CONFIG] = rep_config
self.common._unmap_lun(self.data.test_volume, self.data.connector)
mock_es.assert_called_once_with(extra_specs, rep_config)
@ -148,8 +133,8 @@ class PowerMaxReplicationTest(test.TestCase):
rep_extra_specs = deepcopy(tpd.PowerMaxData.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_config = self.utils.get_replication_config(
[self.replication_device])
rep_config = self.data.rep_config_sync
extra_specs[utils.REP_CONFIG] = rep_config
rep_config[utils.RDF_CONS_EXEMPT] = False
with mock.patch.object(self.common, '_get_replication_extra_specs',
@ -252,15 +237,16 @@ class PowerMaxReplicationTest(test.TestCase):
def test_get_rdf_details(self):
rdf_group_no, remote_array = self.common.get_rdf_details(
self.data.array)
self.assertEqual(self.data.rdf_group_no, rdf_group_no)
self.data.array, self.data.rep_config_sync)
self.assertEqual(self.data.rdf_group_no_1, rdf_group_no)
self.assertEqual(self.data.remote_array, remote_array)
def test_get_rdf_details_exception(self):
with mock.patch.object(self.rest, 'get_rdf_group_number',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.get_rdf_details, self.data.array)
self.common.get_rdf_details, self.data.array,
self.data.rep_config_sync)
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
def test_failover_host(self, mck_sync):
@ -282,17 +268,20 @@ class PowerMaxReplicationTest(test.TestCase):
'get_array_model_info',
return_value=('VMAX250F', False))
def test_get_replication_extra_specs(self, mock_model):
rep_config = self.utils.get_replication_config(
[self.replication_device])
rep_config = self.data.rep_config_sync
# Path one - disable compression
extra_specs1 = deepcopy(self.extra_specs)
extra_specs1[utils.DISABLECOMPRESSION] = 'true'
ref_specs1 = deepcopy(self.data.rep_extra_specs5)
ref_specs1['rdf_group_label'] = self.data.rdf_group_name_1
ref_specs1['rdf_group_no'] = self.data.rdf_group_no_1
rep_extra_specs1 = self.common._get_replication_extra_specs(
extra_specs1, rep_config)
self.assertEqual(ref_specs1, rep_extra_specs1)
# Path two - disable compression, not all flash
ref_specs2 = deepcopy(self.data.rep_extra_specs5)
ref_specs2['rdf_group_label'] = self.data.rdf_group_name_1
ref_specs2['rdf_group_no'] = self.data.rdf_group_no_1
with mock.patch.object(self.rest, 'is_compression_capable',
return_value=False):
rep_extra_specs2 = self.common._get_replication_extra_specs(
@ -303,15 +292,17 @@ class PowerMaxReplicationTest(test.TestCase):
'get_array_model_info',
return_value=('PowerMax 2000', True))
def test_get_replication_extra_specs_powermax(self, mock_model):
rep_config = self.utils.get_replication_config(
[self.replication_device])
rep_specs = deepcopy(self.data.rep_extra_specs2)
rep_config = self.data.rep_config_sync
rep_specs = deepcopy(self.data.rep_extra_specs5)
extra_specs = deepcopy(self.extra_specs)
# SLO not valid, both SLO and Workload set to NONE
rep_specs['slo'] = None
rep_specs['workload'] = None
rep_specs['target_array_model'] = 'PowerMax 2000'
rep_specs['rdf_group_label'] = self.data.rdf_group_name_1
rep_specs['rdf_group_no'] = self.data.rdf_group_no_1
with mock.patch.object(self.provision, 'verify_slo_workload',
return_value=(False, False)):
rep_extra_specs = self.common._get_replication_extra_specs(
@ -328,8 +319,7 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(rep_specs, rep_extra_specs)
def test_get_secondary_stats(self):
rep_config = self.utils.get_replication_config(
[self.replication_device])
rep_config = self.data.rep_config_sync
array_map = self.common.get_attributes_from_cinder_config()
finalarrayinfolist = self.common._get_slo_workload_combinations(
array_map)
@ -342,14 +332,16 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(ref_info, secondary_info)
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
return_value=tpd.PowerMaxData.volume_metadata)
def test_replicate_group(self, mck_meta):
volume_model_update = {
'id': self.data.test_volume.id,
'provider_location': self.data.test_volume.provider_location}
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
vols_model_update = self.common._replicate_group(
self.data.array, [volume_model_update],
self.data.test_vol_grp_name, self.extra_specs)
self.data.test_vol_grp_name, extra_specs)
ref_rep_data = {'array': self.data.remote_array,
'device_id': self.data.device_id2}
ref_vol_update = {
@ -357,7 +349,7 @@ class PowerMaxReplicationTest(test.TestCase):
'provider_location': self.data.test_volume.provider_location,
'replication_driver_data': ref_rep_data,
'replication_status': fields.ReplicationStatus.ENABLED,
'metadata': ''}
'metadata': self.data.volume_metadata}
# Decode string representations of dicts into dicts, because
# the string representations are randomly ordered and therefore
@ -367,19 +359,34 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(ref_vol_update, vols_model_update[0])
@mock.patch.object(
utils.PowerMaxUtils, 'validate_non_replication_group_config')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_create_group(self, mock_type, mock_cg_type, mck_validate):
ref_model_update = {
'status': fields.GroupStatus.AVAILABLE}
model_update = self.common.create_group(None, self.data.test_group_1)
self.assertEqual(ref_model_update, model_update)
extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled]
mck_validate.assert_called_once_with(extra_specs_list)
@mock.patch.object(
utils.PowerMaxUtils, 'validate_replication_group_config')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=True)
def test_create_replicaton_group(self, mock_type, mock_cg_type):
def test_create_replicaton_group(
self, mock_type, mock_cg_type, mck_validate):
ref_model_update = {
'status': fields.GroupStatus.AVAILABLE,
'replication_status': fields.ReplicationStatus.ENABLED}
model_update = self.common.create_group(None, self.data.test_group_1)
self.assertEqual(ref_model_update, model_update)
# Replication mode is async
self.assertRaises(exception.InvalidInput,
self.async_driver.common.create_group,
None, self.data.test_group_1)
extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled]
mck_validate.assert_called_once_with(
self.common.rep_configs, extra_specs_list)
def test_enable_replication(self):
# Case 1: Group not replicated
@ -439,9 +446,9 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual({}, model_update)
@mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group',
return_value=tpd.PowerMaxData.rdf_group_no)
return_value=tpd.PowerMaxData.rdf_group_no_1)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=tpd.PowerMaxData.rdf_group_no)
return_value=tpd.PowerMaxData.rdf_group_no_1)
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
return_value=tpd.PowerMaxData.test_group)
def test_failover_replication_failover(self, mck_find_vol_grp,
@ -456,9 +463,9 @@ class PowerMaxReplicationTest(test.TestCase):
model_update['replication_status'])
@mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group',
return_value=tpd.PowerMaxData.rdf_group_no)
return_value=tpd.PowerMaxData.rdf_group_no_1)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=tpd.PowerMaxData.rdf_group_no)
return_value=tpd.PowerMaxData.rdf_group_no_1)
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
return_value=tpd.PowerMaxData.test_group)
def test_failover_replication_failback(self, mck_find_vol_grp,
@ -488,13 +495,18 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(fields.ReplicationStatus.ERROR,
model_update['replication_status'])
@mock.patch.object(utils.PowerMaxUtils, 'get_volumetype_extra_specs',
return_value={utils.REPLICATION_DEVICE_BACKEND_ID:
tpd.PowerMaxData.rep_backend_id_sync})
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils',
return_value=(tpd.PowerMaxData.array, {}))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_group_replication')
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=True)
def test_delete_replication_group(self, mock_check,
mock_cleanup, mock_utils):
self.common._delete_group(self.data.test_rep_group, [])
mock_cleanup, mock_utils, mock_get):
group = self.data.test_rep_group
group['volume_types'] = self.data.test_volume_type_list
self.common._delete_group(group, [])
mock_cleanup.assert_called_once()
@mock.patch.object(masking.PowerMaxMasking,
@ -531,7 +543,7 @@ class PowerMaxReplicationTest(test.TestCase):
def test_cleanup_group_replication(self, mock_rm, mock_rm_reset):
self.common._cleanup_group_replication(
self.data.array, self.data.test_vol_grp_name,
[self.data.device_id], self.extra_specs)
[self.data.device_id], self.extra_specs, self.data.rep_config_sync)
mock_rm.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, '_failover_replication',
@ -582,7 +594,7 @@ class PowerMaxReplicationTest(test.TestCase):
external_ref = {u'source-name': u'00002'}
volume = self.data.test_volume
ref_model_update = {
'metadata': {},
'metadata': {'BackendID': 'None'},
'provider_location': six.text_type({
'device_id': self.data.device_id,
'array': self.data.array}),
@ -640,13 +652,14 @@ class PowerMaxReplicationTest(test.TestCase):
{}))
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs)
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
def test_create_rep_volume(self, mck_initial, mck_create, mck_meta):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'replication_driver_data': (
tpd.PowerMaxData.provider_location2),
'metadata': {'device-meta-key-1': 'device-meta-value-1',
'metadata': {'BackendID': self.data.rep_backend_id_sync,
'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}})
@ -657,7 +670,7 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
return_value={})
@mock.patch.object(
common.PowerMaxCommon, '_create_cloned_volume',
return_value=(
@ -666,14 +679,15 @@ class PowerMaxReplicationTest(test.TestCase):
def test_create_rep_volume_from_snapshot(self, mck_meta, mck_clone_chk):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': ''})
'metadata': {'BackendID': self.data.rep_backend_id_sync}})
ref_model_update.update(self.data.replication_update)
model_update = self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_snapshot)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
common.PowerMaxCommon, 'get_volume_metadata',
return_value=tpd.PowerMaxData.volume_metadata)
@mock.patch.object(
common.PowerMaxCommon, '_create_cloned_volume',
return_value=(
@ -681,9 +695,12 @@ class PowerMaxReplicationTest(test.TestCase):
tpd.PowerMaxData.replication_update, {}))
@mock.patch.object(common.PowerMaxCommon, '_clone_check')
def test_cloned_rep_volume(self, mck_clone, mck_meta, mck_clone_chk):
metadata = deepcopy(self.data.volume_metadata)
metadata['BackendID'] = self.data.rep_backend_id_sync
ref_model_update = {
'provider_location': six.text_type(
self.data.provider_location_clone), 'metadata': ''}
self.data.provider_location_clone),
'metadata': metadata}
ref_model_update.update(self.data.replication_update)
model_update = self.common.create_cloned_volume(
self.data.test_clone_volume, self.data.test_volume)
@ -703,36 +720,37 @@ class PowerMaxReplicationTest(test.TestCase):
return_value=tpd.PowerMaxData.default_sg_re_enabled)
@mock.patch.object(
common.PowerMaxCommon, 'prepare_replication_details',
return_value=(True, tpd.PowerMaxData.rep_extra_specs_mgmt, {}))
return_value=(True, tpd.PowerMaxData.rep_extra_specs_rep_config,
{}, True))
@mock.patch.object(
provision.PowerMaxProvision, 'verify_slo_workload',
return_value=(True, True))
def test_create_volume_rep_enabled(
self, mck_slo, mck_prep, mck_get, mck_create, mck_protect, mck_set,
mck_add):
volume = self.data.test_volume
volume_name = self.data.volume_id
volume_size = 1
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.SLO] = utils.REP_ASYNC
self.common.rep_config = {'rdf_group_label': self.data.rdf_group_name}
extra_specs['mode'] = utils.REP_ASYNC
volume_dict, rep_update, rep_info_dict = self.common._create_volume(
volume_name, volume_size, extra_specs)
volume, volume_name, volume_size, extra_specs)
self.assertEqual(self.data.provider_location, volume_dict)
self.assertEqual(self.data.replication_update, rep_update)
self.assertIsNone(rep_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(tpd.PowerMaxData.rdf_group_no, None))
return_value=(tpd.PowerMaxData.rdf_group_no_1, None))
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
side_effect=[False, True])
def test_remove_vol_and_cleanup_replication(self, mck_rep, mck_get):
array = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
with mock.patch.object(
self.masking, 'remove_and_reset_members') as mock_rm:
@ -747,6 +765,30 @@ class PowerMaxReplicationTest(test.TestCase):
mock_clean.assert_called_once_with(
array, rdf_group_no, device_id, extra_specs)
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
return_value=False)
def test_remove_vol_and_cleanup_replication_host_assisted_migration(
self, mck_rep):
array = self.data.array
device_id = self.data.device_id
volume = deepcopy(self.data.test_volume)
volume.migration_status = 'deleting'
metadata = deepcopy(self.data.volume_metadata)
metadata[utils.IS_RE_CAMEL] = 'False'
volume.metadata = metadata
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
ref_extra_specs = deepcopy(extra_specs)
ref_extra_specs.pop(utils.IS_RE)
with mock.patch.object(
self.masking, 'remove_and_reset_members') as mock_rm:
self.common._remove_vol_and_cleanup_replication(
array, device_id, volume_name, extra_specs, volume)
mock_rm.assert_called_once_with(
array, volume, device_id, volume_name, ref_extra_specs, False)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@ -756,7 +798,7 @@ class PowerMaxReplicationTest(test.TestCase):
@mock.patch.object(
common.PowerMaxCommon, 'break_rdf_device_pair_session',
return_value=({'mgmt_sg_name': tpd.PowerMaxData.rdf_managed_async_grp,
'rdf_group_no': tpd.PowerMaxData.rdf_group_no}, True))
'rdf_group_no': tpd.PowerMaxData.rdf_group_no_1}, True))
def test_migrate_volume_success_rep_to_no_rep(
self, mck_break, mck_retype, mck_resume, mck_get):
array_id = self.data.array
@ -786,6 +828,7 @@ class PowerMaxReplicationTest(test.TestCase):
target_slo, target_workload, target_extra_specs)
self.assertTrue(success)
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
@mock.patch.object(
@ -802,7 +845,7 @@ class PowerMaxReplicationTest(test.TestCase):
'remote_array': tpd.PowerMaxData.remote_array},
tpd.PowerMaxData.rep_extra_specs, False))
def test_migrate_volume_success_no_rep_to_rep(
self, mck_configure, mck_retype, mck_protect, mck_get):
self, mck_configure, mck_retype, mck_protect, mck_get, mck_check):
self.common.rep_config = {'mode': utils.REP_SYNC,
'array': self.data.array}
array_id = self.data.array
@ -812,7 +855,9 @@ class PowerMaxReplicationTest(test.TestCase):
target_slo = self.data.slo_silver
target_workload = self.data.workload
volume_name = volume.name
extra_specs = self.data.extra_specs
extra_specs = deepcopy(self.data.extra_specs)
rep_config_sync = deepcopy(self.data.rep_config_sync)
rep_config_sync[utils.RDF_CONS_EXEMPT] = False
new_type = {'extra_specs': self.data.rep_extra_specs}
target_extra_specs = deepcopy(new_type['extra_specs'])
@ -821,7 +866,8 @@ class PowerMaxReplicationTest(test.TestCase):
utils.WORKLOAD: target_workload,
utils.INTERVAL: extra_specs[utils.INTERVAL],
utils.RETRIES: extra_specs[utils.RETRIES],
utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC})
utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC,
utils.REP_CONFIG: rep_config_sync})
success, model_update = self.common._migrate_volume(
array_id, volume, device_id, srp, target_slo, target_workload,
@ -851,6 +897,8 @@ class PowerMaxReplicationTest(test.TestCase):
target_slo = self.data.slo_silver
target_workload = self.data.workload
volume_name = volume.name
rep_config_sync = deepcopy(self.data.rep_config_sync)
rep_config_sync[utils.RDF_CONS_EXEMPT] = False
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.SLO] = self.data.slo_diamond
new_type = {'extra_specs': self.data.rep_extra_specs}
@ -861,7 +909,8 @@ class PowerMaxReplicationTest(test.TestCase):
utils.WORKLOAD: target_workload,
utils.INTERVAL: extra_specs[utils.INTERVAL],
utils.RETRIES: extra_specs[utils.RETRIES],
utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC})
utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC,
utils.REP_CONFIG: rep_config_sync})
success, model_update = self.common._migrate_volume(
array_id, volume, device_id, srp, target_slo, target_workload,
@ -885,7 +934,8 @@ class PowerMaxReplicationTest(test.TestCase):
volume_name = self.data.volume_id
remote_array = self.data.remote_array
target_device_id = self.data.device_id2
extra_specs = self.data.rep_extra_specs
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
group_name = self.data.rdf_managed_async_grp
get_create_grp_calls = [
@ -931,10 +981,11 @@ class PowerMaxReplicationTest(test.TestCase):
remote_array = self.data.remote_array
device_id = self.data.device_id
target_device_id = self.data.device_id2
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
volume_name = self.data.volume_id
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.REP_MODE] = utils.REP_METRO
rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro
sg_name = self.data.default_sg_re_enabled
async_grp = self.data.rdf_managed_async_grp
pair_state = utils.RDF_SYNC_STATE
@ -949,7 +1000,7 @@ class PowerMaxReplicationTest(test.TestCase):
rdf_group_no, volume_name, rep_extra_specs)
mck_paired.assert_called_once_with(
array, remote_array, device_id, target_device_id)
mck_get_rdf.assert_called_once_with(self.common.rep_config)
mck_get_rdf.assert_called_once_with(self.data.rep_config_metro)
mck_get_sg.assert_called_once_with(array, device_id)
mck_break.assert_called_once_with(
array, device_id, sg_name, rdf_group_no, rep_extra_specs,
@ -986,7 +1037,7 @@ class PowerMaxReplicationTest(test.TestCase):
remote_array = self.data.remote_array
device_id = self.data.device_id
target_device_id = self.data.device_id2
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
volume_name = self.data.volume_id
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.REP_MODE] = utils.REP_SYNC
@ -1022,15 +1073,16 @@ class PowerMaxReplicationTest(test.TestCase):
return_value=tpd.PowerMaxData.rep_extra_specs)
@mock.patch.object(
common.PowerMaxCommon, 'get_rdf_details',
return_value=(tpd.PowerMaxData.rdf_group_no,
return_value=(tpd.PowerMaxData.rdf_group_no_1,
tpd.PowerMaxData.remote_array))
def test_cleanup_rdf_device_pair_vol_cnt_exception(
self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list):
array = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
device_id = self.data.device_id
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_SYNC
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
self.assertRaises(
exception.VolumeBackendAPIException,
self.common.cleanup_rdf_device_pair, array, rdf_group_no,
@ -1066,19 +1118,20 @@ class PowerMaxReplicationTest(test.TestCase):
return_value=tpd.PowerMaxData.rep_extra_specs_mgmt)
@mock.patch.object(
common.PowerMaxCommon, 'get_rdf_details',
return_value=(tpd.PowerMaxData.rdf_group_no,
return_value=(tpd.PowerMaxData.rdf_group_no_1,
tpd.PowerMaxData.remote_array))
def test_cleanup_rdf_device_pair(
self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list,
mck_wait, mck_get_mgmt_grp, mck_get_num_vols, mck_suspend,
mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume):
array = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
device_id = self.data.device_id
target_device_id = self.data.device_id2
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_METRO
rep_extra_specs = self.data.rep_extra_specs_mgmt
extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro
rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt)
rdf_mgmt_grp = self.data.rdf_managed_async_grp
self.common.cleanup_rdf_device_pair(
@ -1101,24 +1154,25 @@ class PowerMaxReplicationTest(test.TestCase):
return_value=tpd.PowerMaxData.rep_extra_specs_mgmt)
def test_prepare_replication_details(self, mck_get_rep, mck_get_vols):
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs['rep_mode'] = 'Synchronous'
extra_specs['workload'] = 'NONE'
extra_specs['rep_mode'] = utils.REP_SYNC
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
rep_extra_specs = self.data.rep_extra_specs_mgmt
ref_info_dict = {
'initial_device_list': ['00001', '00002'],
'local_array': self.data.array,
'rdf_group_no': self.data.rdf_group_no,
'rdf_group_no': self.data.rdf_group_no_1,
'remote_array': self.data.remote_array,
'rep_mode': utils.REP_SYNC, 'service_level': self.data.slo_diamond,
'sg_name': self.data.default_sg_no_slo_re_enabled,
'sync_interval': 2, 'sync_retries': 200}
rep_first_vol, resp_extra_specs, rep_info_dict = (
rep_first_vol, resp_extra_specs, rep_info_dict, rdfg_empty = (
self.common.prepare_replication_details(extra_specs))
self.assertFalse(rep_first_vol)
self.assertEqual(rep_extra_specs, resp_extra_specs)
self.assertEqual(ref_info_dict, rep_info_dict)
self.assertFalse(rdfg_empty)
@mock.patch.object(
rest.PowerMaxRest, 'srdf_protect_storage_group')
@ -1135,9 +1189,11 @@ class PowerMaxReplicationTest(test.TestCase):
volume_dict['storage_group'], rep_extra_specs['slo'], extra_specs)
def test_gather_replication_updates(self):
self.common.rep_config = {'rdf_group_label': self.data.rdf_group_name}
self.common.rep_config = {
'rdf_group_label': self.data.rdf_group_name_1}
extra_specs = self.data.rep_extra_specs
rep_extra_specs = self.data.rep_extra_specs_mgmt
rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt)
rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
volume_dict = {'storage_group': self.data.rdf_managed_async_grp,
'remote_device_id': self.data.device_id2,
'device_uuid': self.data.volume_id}

View File

@ -506,6 +506,50 @@ class PowerMaxRestTest(test.TestCase):
self.data.failed_resource, device_id,
self.data.extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
def test_remove_vol_from_sg_force_true(self, mck_wait):
device_id = self.data.device_id
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.FORCE_VOL_REMOVE] = True
expected_payload = (
{"executionOption": "ASYNCHRONOUS",
"editStorageGroupActionParam": {
"removeVolumeParam": {
"volumeId": [device_id],
"remoteSymmSGInfoParam": {
"force": "true"}}}})
with mock.patch.object(
self.rest, 'modify_storage_group', return_value=(
200, tpd.PowerMaxData.job_list)) as mck_mod:
self.rest.remove_vol_from_sg(
self.data.array, self.data.storagegroup_name_f, device_id,
extra_specs)
mck_mod.assert_called_with(
self.data.array, self.data.storagegroup_name_f,
expected_payload)
@mock.patch.object(rest.PowerMaxRest, 'wait_for_job')
def test_remove_vol_from_sg_force_false(self, mck_wait):
device_id = self.data.device_id
extra_specs = deepcopy(self.data.extra_specs)
extra_specs.pop(utils.FORCE_VOL_REMOVE, None)
expected_payload = (
{"executionOption": "ASYNCHRONOUS",
"editStorageGroupActionParam": {
"removeVolumeParam": {
"volumeId": [device_id],
"remoteSymmSGInfoParam": {
"force": "false"}}}})
with mock.patch.object(
self.rest, 'modify_storage_group', return_value=(
200, tpd.PowerMaxData.job_list)) as mck_mod:
self.rest.remove_vol_from_sg(
self.data.array, self.data.storagegroup_name_f, device_id,
extra_specs)
mck_mod.assert_called_with(
self.data.array, self.data.storagegroup_name_f,
expected_payload)
def test_get_vmax_default_storage_group(self):
ref_storage_group = self.data.sg_details[0]
ref_sg_name = self.data.defaultstoragegroup_name
@ -620,8 +664,8 @@ class PowerMaxRestTest(test.TestCase):
array = self.data.array
device_id = self.data.device_id
new_size = '3'
extra_specs = self.data.extra_specs,
rdfg_num = self.data.rdf_group_no
extra_specs = self.data.extra_specs
rdfg_num = self.data.rdf_group_no_1
extend_vol_payload = {'executionOption': 'ASYNCHRONOUS',
'editVolumeActionParam': {
@ -1364,15 +1408,15 @@ class PowerMaxRestTest(test.TestCase):
def test_set_storagegroup_srp(self, mock_mod):
self.rest.set_storagegroup_srp(
self.data.array, self.data.test_vol_grp_name,
self.data.srp2, self.data.extra_specs)
self.data.srp, self.data.extra_specs)
mock_mod.assert_called_once()
def test_get_rdf_group(self):
with mock.patch.object(self.rest, 'get_resource') as mock_get:
self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no)
self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no_1)
mock_get.assert_called_once_with(
self.data.array, 'replication', 'rdf_group',
self.data.rdf_group_no)
self.data.rdf_group_no_1)
def test_get_rdf_group_list(self):
rdf_list = self.rest.get_rdf_group_list(self.data.array)
@ -1405,17 +1449,17 @@ class PowerMaxRestTest(test.TestCase):
def test_get_rdf_group_number(self):
rdfg_num = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.assertEqual(self.data.rdf_group_no, rdfg_num)
self.data.array, self.data.rdf_group_name_1)
self.assertEqual(self.data.rdf_group_no_1, rdfg_num)
with mock.patch.object(self.rest, 'get_rdf_group_list',
return_value=None):
rdfg_num2 = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.data.array, self.data.rdf_group_name_1)
self.assertIsNone(rdfg_num2)
with mock.patch.object(self.rest, 'get_rdf_group',
return_value=None):
rdfg_num3 = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.data.array, self.data.rdf_group_name_1)
self.assertIsNone(rdfg_num3)
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_group',
@ -1427,26 +1471,26 @@ class PowerMaxRestTest(test.TestCase):
# First volume out, Metro use bias not set
act_payload_1 = self.rest.get_metro_payload_info(
self.data.array, payload_in.copy(), self.data.rdf_group_no, {},
self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {},
True)
self.assertEqual(payload_in, act_payload_1)
# First volume out, Metro use bias set
act_payload_2 = self.rest.get_metro_payload_info(
self.data.array, payload_in.copy(), self.data.rdf_group_no,
self.data.array, payload_in.copy(), self.data.rdf_group_no_1,
{'metro_bias': True}, True)
self.assertEqual('true', act_payload_2['metroBias'])
# Not first vol in RDFG, consistency exempt not set
act_payload_3 = self.rest.get_metro_payload_info(
self.data.array, payload_in.copy(), self.data.rdf_group_no,
self.data.array, payload_in.copy(), self.data.rdf_group_no_1,
{'exempt': False}, False)
ref_payload_3 = {'rdfMode': 'Active', 'rdfType': 'RDF1'}
self.assertEqual(ref_payload_3, act_payload_3)
# Not first vol in RDFG, consistency exempt set
act_payload_4 = self.rest.get_metro_payload_info(
self.data.array, payload_in.copy(), self.data.rdf_group_no,
self.data.array, payload_in.copy(), self.data.rdf_group_no_1,
{'exempt': True}, True)
ref_payload_4 = {'rdfType': 'RDF1', 'exempt': 'true',
'rdfMode': 'Active'}
@ -1504,17 +1548,17 @@ class PowerMaxRestTest(test.TestCase):
def test_get_storagegroup_rdf_details(self):
details = self.rest.get_storagegroup_rdf_details(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no)
self.data.rdf_group_no_1)
self.assertEqual(self.data.sg_rdf_details[0], details)
def test_verify_rdf_state(self):
verify1 = self.rest._verify_rdf_state(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no, 'Failover')
self.data.rdf_group_no_1, 'Failover')
self.assertTrue(verify1)
verify2 = self.rest._verify_rdf_state(
self.data.array, self.data.test_fo_vol_group,
self.data.rdf_group_no, 'Establish')
self.data.rdf_group_no_1, 'Establish')
self.assertTrue(verify2)
def test_delete_storagegroup_rdf(self):
@ -1522,7 +1566,7 @@ class PowerMaxRestTest(test.TestCase):
self.rest, 'delete_resource') as mock_del:
self.rest.delete_storagegroup_rdf(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no)
self.data.rdf_group_no_1)
mock_del.assert_called_once()
def test_is_next_gen_array(self):
@ -1731,17 +1775,18 @@ class PowerMaxRestTest(test.TestCase):
return_value=tpd.PowerMaxData.sg_rdf_group_details)
def test_get_storage_group_rdf_group_state(self, mck_get):
ref_get_resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % {
'sg': self.data.test_vol_grp_name, 'rdfg': self.data.rdf_group_no})
'sg': self.data.test_vol_grp_name,
'rdfg': self.data.rdf_group_no_1})
states = self.rest.get_storage_group_rdf_group_state(
self.data.array, self.data.test_vol_grp_name,
self.data.rdf_group_no)
self.data.rdf_group_no_1)
mck_get.assert_called_once_with(
self.data.array, 'replication', ref_get_resource)
self.assertEqual(states, [utils.RDF_SUSPENDED_STATE])
@mock.patch.object(rest.PowerMaxRest, 'get_resource')
def test_get_rdf_pair_volume(self, mck_get):
rdf_grp_no = self.data.rdf_group_no
rdf_grp_no = self.data.rdf_group_no_1
device_id = self.data.device_id
array = self.data.array
ref_get_resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % {
@ -1755,11 +1800,12 @@ class PowerMaxRestTest(test.TestCase):
def test_srdf_protect_storage_group(self, mck_create, mck_wait):
array_id = self.data.array
remote_array_id = self.data.remote_array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
replication_mode = utils.REP_METRO
sg_name = self.data.default_sg_re_enabled
service_level = 'Diamond'
extra_specs = self.data.rep_extra_specs_metro
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.METROBIAS] = True
remote_sg = self.data.rdf_managed_async_grp
ref_payload = {
@ -1767,8 +1813,8 @@ class PowerMaxRestTest(test.TestCase):
'replicationMode': 'Active', 'remoteSLO': service_level,
'remoteSymmId': remote_array_id, 'rdfgNumber': rdf_group_no,
'remoteStorageGroupName': remote_sg, 'establish': 'true'}
ref_resource = ('storagegroup/%(sg_name)s/rdf_group' % {
'sg_name': sg_name})
ref_resource = ('storagegroup/%(sg_name)s/rdf_group' %
{'sg_name': sg_name})
self.rest.srdf_protect_storage_group(
array_id, remote_array_id, rdf_group_no, replication_mode,
@ -1781,7 +1827,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=(200, 'job'))
def test_srdf_modify_group(self, mck_modify, mck_wait):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
payload = {'executionOption': 'ASYNCHRONOUS', 'action': 'Suspend'}
extra_specs = self.data.rep_extra_specs
@ -1800,7 +1846,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=(200, 'job'))
def test_srdf_modify_group_async_call_false(self, mck_modify, mck_wait):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
payload = {'action': 'Suspend'}
extra_specs = self.data.rep_extra_specs
@ -1819,7 +1865,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_CONSISTENT_STATE])
def test_srdf_suspend_replication(self, mck_get, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1836,7 +1882,7 @@ class PowerMaxRestTest(test.TestCase):
def test_srdf_suspend_replication_already_suspended(self, mck_get,
mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1849,9 +1895,11 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_SUSPENDED_STATE])
def test_srdf_resume_replication(self, mck_get, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
rep_extra_specs[utils.REP_MODE] = utils.REP_ASYNC
self.rest.srdf_resume_replication(
array_id, sg_name, rdf_group_no, rep_extra_specs)
@ -1864,9 +1912,10 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_SUSPENDED_STATE])
def test_srdf_resume_replication_metro(self, mck_get, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs_metro
rep_extra_specs = deepcopy(self.data.rep_extra_specs_metro)
rep_extra_specs[utils.REP_MODE] = utils.REP_METRO
self.rest.srdf_resume_replication(
array_id, sg_name, rdf_group_no, rep_extra_specs)
@ -1881,7 +1930,7 @@ class PowerMaxRestTest(test.TestCase):
def test_srdf_resume_replication_already_resumed(self, mck_get,
mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1894,7 +1943,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_CONSISTENT_STATE])
def test_srdf_establish_replication(self, mck_get, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1912,7 +1961,7 @@ class PowerMaxRestTest(test.TestCase):
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
def test_srdf_failover_group(self, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1925,7 +1974,7 @@ class PowerMaxRestTest(test.TestCase):
@mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group')
def test_srdf_failback_group(self, mck_modify):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = self.data.rep_extra_specs
@ -1961,7 +2010,7 @@ class PowerMaxRestTest(test.TestCase):
@mock.patch.object(rest.PowerMaxRest, 'delete_resource')
def test_srdf_delete_device_pair(self, mck_del):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
device_id = self.data.device_id
ref_resource = ('%(rdfg)s/volume/%(dev)s' % {
'rdfg': rdf_group_no, 'dev': device_id})
@ -1981,11 +2030,12 @@ class PowerMaxRestTest(test.TestCase):
self, mck_create, mck_wait, mck_get):
array_id = self.data.array
remote_array = self.data.remote_array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
mode = utils.REP_ASYNC
device_id = self.data.device_id
tgt_device_id = self.data.device_id2
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs['array'] = remote_array
ref_payload = {
'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode,
@ -2015,11 +2065,12 @@ class PowerMaxRestTest(test.TestCase):
self, mck_create, mck_wait, mck_get):
array_id = self.data.array
remote_array = self.data.remote_array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
mode = utils.REP_SYNC
device_id = self.data.device_id
tgt_device_id = self.data.device_id2
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs[utils.ARRAY] = remote_array
ref_payload = {
'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode,
@ -2043,7 +2094,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_CONSISTENT_STATE])
def test_wait_for_rdf_group_sync(self, mck_get):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['sync_retries'] = 2
@ -2057,7 +2108,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=[utils.RDF_SYNCINPROG_STATE])
def test_wait_for_rdf_group_sync_fail(self, mck_get):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['sync_retries'] = 1
@ -2071,7 +2122,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=tpd.PowerMaxData.rdf_group_vol_details)
def test_wait_for_rdf_pair_sync(self, mck_get):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['sync_retries'] = 2
@ -2086,7 +2137,7 @@ class PowerMaxRestTest(test.TestCase):
return_value=tpd.PowerMaxData.rdf_group_vol_details_not_synced)
def test_wait_for_rdf_pair_sync_fail(self, mck_get):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['sync_retries'] = 1

View File

@ -41,10 +41,12 @@ class PowerMaxUtilsTest(test.TestCase):
self.data = tpd.PowerMaxData()
volume_utils.get_max_over_subscription_ratio = mock.Mock()
super(PowerMaxUtilsTest, self).setUp()
self.replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
None, 'UtilsTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i])
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i],
replication_device=self.replication_device)
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
@ -268,13 +270,13 @@ class PowerMaxUtilsTest(test.TestCase):
rep_device_list1 = [{'target_device_id': self.data.remote_array,
'remote_pool': self.data.srp,
'remote_port_group': self.data.port_group_name_f,
'rdf_group_label': self.data.rdf_group_name}]
rep_config1 = self.utils.get_replication_config(rep_device_list1)
'rdf_group_label': self.data.rdf_group_name_1}]
rep_config1 = self.utils.get_replication_config(rep_device_list1)[0]
self.assertEqual(self.data.remote_array, rep_config1['array'])
# Success, allow_extend true
rep_device_list2 = rep_device_list1
rep_device_list2[0]['allow_extend'] = 'true'
rep_config2 = self.utils.get_replication_config(rep_device_list2)
rep_config2 = self.utils.get_replication_config(rep_device_list2)[0]
self.assertTrue(rep_config2['allow_extend'])
# No rep_device_list
rep_device_list3 = []
@ -288,34 +290,50 @@ class PowerMaxUtilsTest(test.TestCase):
# Success, mode is async
rep_device_list5 = rep_device_list2
rep_device_list5[0]['mode'] = 'async'
rep_config5 = self.utils.get_replication_config(rep_device_list5)
rep_config5 = self.utils.get_replication_config(rep_device_list5)[0]
self.assertEqual(utils.REP_ASYNC, rep_config5['mode'])
# Success, mode is metro - no other options set
rep_device_list6 = rep_device_list5
rep_device_list6[0]['mode'] = 'metro'
rep_config6 = self.utils.get_replication_config(rep_device_list6)
rep_config6 = self.utils.get_replication_config(rep_device_list6)[0]
self.assertFalse(rep_config6['metro_bias'])
# Success, mode is metro - metro options true
rep_device_list7 = rep_device_list6
rep_device_list6[0].update({'metro_use_bias': 'true'})
rep_config7 = self.utils.get_replication_config(rep_device_list7)
rep_device_list7[0].update({'metro_use_bias': 'true'})
rep_config7 = self.utils.get_replication_config(rep_device_list7)[0]
self.assertTrue(rep_config7['metro_bias'])
# Success, no backend id
self.assertIsNone(rep_config7.get(utils.BACKEND_ID))
# Success, backend id
rep_device_list8 = rep_device_list6
rep_device_list8[0].update(
{utils.BACKEND_ID: self.data.rep_backend_id_sync})
rep_config8 = self.utils.get_replication_config(rep_device_list8)[0]
self.assertEqual(
self.data.rep_backend_id_sync, rep_config8[utils.BACKEND_ID])
# Success, multi-rep
multi_rep_device_list = self.data.multi_rep_device
multi_rep_config = self.utils.get_replication_config(
multi_rep_device_list)
self.assertTrue(len(multi_rep_config) > 1)
for rep_config in multi_rep_config:
self.assertEqual(rep_config['array'], self.data.remote_array)
def test_get_replication_config_sync_retries_intervals(self):
# Default sync interval & retry values
rep_device_list1 = [{'target_device_id': self.data.remote_array,
'remote_pool': self.data.srp,
'remote_port_group': self.data.port_group_name_f,
'rdf_group_label': self.data.rdf_group_name}]
'rdf_group_label': self.data.rdf_group_name_1}]
rep_config1 = self.utils.get_replication_config(rep_device_list1)
rep_config1 = self.utils.get_replication_config(rep_device_list1)[0]
self.assertEqual(200, rep_config1['sync_retries'])
self.assertEqual(3, rep_config1['sync_interval'])
# User set interval & retry values
rep_device_list2 = deepcopy(rep_device_list1)
rep_device_list2[0].update({'sync_retries': 300, 'sync_interval': 1})
rep_config2 = self.utils.get_replication_config(rep_device_list2)
rep_config2 = self.utils.get_replication_config(rep_device_list2)[0]
self.assertEqual(300, rep_config2['sync_retries'])
self.assertEqual(1, rep_config2['sync_interval'])
@ -421,7 +439,7 @@ class PowerMaxUtilsTest(test.TestCase):
self.assertEqual('-RM', metro_prefix)
def test_get_rdf_management_group_name(self):
rep_config = {'rdf_group_label': self.data.rdf_group_name,
rep_config = {'rdf_group_label': self.data.rdf_group_name_1,
'mode': utils.REP_ASYNC}
grp_name = self.utils.get_rdf_management_group_name(rep_config)
self.assertEqual(self.data.rdf_managed_async_grp, grp_name)
@ -438,6 +456,7 @@ class PowerMaxUtilsTest(test.TestCase):
def test_does_vol_need_rdf_management_group(self):
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs['rep_mode'] = utils.REP_SYNC
self.assertFalse(self.utils.does_vol_need_rdf_management_group(
extra_specs))
extra_specs[utils.REP_MODE] = utils.REP_ASYNC
@ -459,9 +478,36 @@ class PowerMaxUtilsTest(test.TestCase):
self.assertEqual(expected_snap_name, updated_name)
def test_change_replication(self):
new_type = {'extra_specs': self.data.extra_specs_rep_enabled}
self.assertFalse(self.utils.change_replication(True, new_type))
self.assertTrue(self.utils.change_replication(False, new_type))
non_rep_extra_specs = self.data.extra_specs
rep_extra_specs = self.data.extra_specs_rep_enabled
change_rep = self.utils.change_replication(
non_rep_extra_specs, rep_extra_specs)
self.assertTrue(change_rep)
def test_change_replication_different_backend_id(self):
rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled)
rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A'
rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled)
rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'B'
change_rep = self.utils.change_replication(
rep_extra_specs_a, rep_extra_specs_b)
self.assertTrue(change_rep)
def test_change_replication_no_change(self):
non_rep_extra_specs_a = self.data.extra_specs
non_rep_extra_specs_b = self.data.extra_specs
change_rep = self.utils.change_replication(
non_rep_extra_specs_a, non_rep_extra_specs_b)
self.assertFalse(change_rep)
def test_change_replication_no_change_same_backend_id(self):
rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled)
rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A'
rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled)
rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A'
change_rep = self.utils.change_replication(
rep_extra_specs_a, rep_extra_specs_b)
self.assertFalse(change_rep)
def test_get_child_sg_name(self):
host_name = 'HostX'
@ -1158,8 +1204,8 @@ class PowerMaxUtilsTest(test.TestCase):
def test_get_unique_device_ids_from_lists(self):
list_a = ['00001', '00002', '00003']
list_b = ['00002', '00003', '00004']
unique_ids = self.utils.get_unique_device_ids_from_lists(list_a,
list_b)
unique_ids = self.utils.get_unique_device_ids_from_lists(
list_a, list_b)
self.assertEqual(['00004'], unique_ids)
def test_update_payload_for_rdf_vol_create(self):
@ -1195,23 +1241,305 @@ class PowerMaxUtilsTest(test.TestCase):
tgt_extra_specs = deepcopy(self.data.rep_extra_specs)
tgt_extra_specs['rep_mode'] = utils.REP_METRO
self.assertTrue(self.utils.is_retype_supported(volume, src_extra_specs,
tgt_extra_specs))
rep_configs = self.data.multi_rep_config_list
src_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = (
self.data.rep_backend_id_sync)
tgt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = (
self.data.rep_backend_id_metro)
self.assertTrue(self.utils.is_retype_supported(
volume, src_extra_specs, tgt_extra_specs, rep_configs))
# Volume source type not replicated, target type Metro replicated,
# volume is attached, host-assisted retype not supported
volume.attach_status = 'attached'
self.assertFalse(self.utils.is_retype_supported(
volume, src_extra_specs, tgt_extra_specs))
volume, src_extra_specs, tgt_extra_specs, rep_configs))
# Volume source type Async replicated, target type Metro replicated,
# volume is attached, host-assisted retype not supported
src_extra_specs['rep_mode'] = utils.REP_ASYNC
self.assertFalse(self.utils.is_retype_supported(
volume, src_extra_specs, tgt_extra_specs))
volume, src_extra_specs, tgt_extra_specs, rep_configs))
# Volume source type Metro replicated, target type Metro replicated,
# volume is attached, host-assisted retype supported
src_extra_specs['rep_mode'] = utils.REP_METRO
self.assertTrue(self.utils.is_retype_supported(
volume, src_extra_specs, tgt_extra_specs))
volume, src_extra_specs, tgt_extra_specs, rep_configs))
def test_validate_multiple_rep_device(self):
self.utils.validate_multiple_rep_device(self.data.multi_rep_device)
def test_validate_multiple_rep_device_non_unique_backend_id(self):
rep_devices = self.data.multi_rep_device
rep_devices[0][utils.BACKEND_ID] = rep_devices[1][utils.BACKEND_ID]
self.assertRaises(
exception.InvalidConfigurationValue,
self.utils.validate_multiple_rep_device,
self.data.multi_rep_device)
def test_validate_multiple_rep_device_missing_backend_id(self):
rep_devices = self.data.multi_rep_device
rep_devices[0].pop(utils.BACKEND_ID)
self.assertRaises(
exception.InvalidConfigurationValue,
self.utils.validate_multiple_rep_device,
self.data.multi_rep_device)
def test_validate_multiple_rep_device_non_unique_rdf_label(self):
rep_devices = self.data.multi_rep_device
rep_devices[0]['rdf_group_label'] = rep_devices[1]['rdf_group_label']
self.assertRaises(
exception.InvalidConfigurationValue,
self.utils.validate_multiple_rep_device,
self.data.multi_rep_device)
def test_validate_multiple_rep_device_non_unique_rdf_modes(self):
rep_devices = [self.data.rep_dev_1, self.data.rep_dev_2]
rep_devices[1]['mode'] = rep_devices[0]['mode']
self.assertRaises(
exception.InvalidConfigurationValue,
self.utils.validate_multiple_rep_device,
rep_devices)
def test_validate_multiple_rep_device_multiple_targets(self):
rep_devices = [self.data.rep_dev_1, self.data.rep_dev_2]
rep_devices[1]['target_device_id'] = 1234
self.assertRaises(
exception.InvalidConfigurationValue,
self.utils.validate_multiple_rep_device,
rep_devices)
def test_get_rep_config_single_rep(self):
rep_configs = self.data.sync_rep_config_list
rep_config = self.utils.get_rep_config('test', rep_configs)
self.assertEqual(rep_config, rep_configs[0])
def test_get_rep_config_multi_rep(self):
rep_configs = self.data.multi_rep_config_list
backend_id = rep_configs[0][utils.BACKEND_ID]
rep_device = self.utils.get_rep_config(backend_id, rep_configs)
self.assertEqual(rep_configs[0], rep_device)
def test_get_rep_config_fail(self):
rep_configs = self.data.multi_rep_config_list
backend_id = 'invalid key'
self.assertRaises(exception.InvalidInput, self.utils.get_rep_config,
backend_id, rep_configs)
def test_get_replication_targets(self):
rep_targets_expected = [self.data.remote_array]
rep_configs = self.data.multi_rep_config_list
rep_targets_actual = self.utils.get_replication_targets(rep_configs)
self.assertEqual(rep_targets_expected, rep_targets_actual)
def test_validate_failover_request_success(self):
is_failed_over = False
failover_backend_id = self.data.rep_backend_id_sync
rep_configs = self.data.multi_rep_config_list
is_valid, msg = self.utils.validate_failover_request(
is_failed_over, failover_backend_id, rep_configs)
self.assertTrue(is_valid)
self.assertEqual("", msg)
def test_validate_failover_request_already_failed_over(self):
is_failed_over = True
failover_backend_id = self.data.rep_backend_id_sync
rep_configs = self.data.multi_rep_config_list
is_valid, msg = self.utils.validate_failover_request(
is_failed_over, failover_backend_id, rep_configs)
self.assertFalse(is_valid)
expected_msg = ('Cannot failover, the backend is already in a failed '
'over state, if you meant to failback, please add '
'--backend_id default to the command.')
self.assertEqual(expected_msg, msg)
def test_validate_failover_request_invalid_failback(self):
is_failed_over = False
failover_backend_id = 'default'
rep_configs = self.data.multi_rep_config_list
is_valid, msg = self.utils.validate_failover_request(
is_failed_over, failover_backend_id, rep_configs)
self.assertFalse(is_valid)
expected_msg = ('Cannot failback, backend is not in a failed over '
'state. If you meant to failover, please either omit '
'the --backend_id parameter or use the --backend_id '
'parameter with a valid backend id.')
self.assertEqual(expected_msg, msg)
def test_validate_failover_request_no_backend_id_multi_rep(self):
is_failed_over = False
failover_backend_id = None
rep_configs = self.data.multi_rep_config_list
is_valid, msg = self.utils.validate_failover_request(
is_failed_over, failover_backend_id, rep_configs)
self.assertFalse(is_valid)
expected_msg = ('Cannot failover, no backend_id provided while '
'multiple replication devices are defined in '
'cinder.conf, please provide a backend_id '
'which will act as new primary array by '
'appending --backend_id <id> to your command.')
self.assertEqual(expected_msg, msg)
def test_validate_failover_request_incorrect_backend_id_multi_rep(self):
is_failed_over = False
failover_backend_id = 'invalid_id'
rep_configs = self.data.multi_rep_config_list
self.assertRaises(exception.InvalidInput,
self.utils.validate_failover_request,
is_failed_over, failover_backend_id, rep_configs)
def test_validate_replication_group_config_success(self):
rep_configs = deepcopy(self.data.multi_rep_config_list)
extra_specs = deepcopy(
self.data.vol_type_extra_specs_rep_enabled_backend_id_sync)
extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = (
self.data.rep_backend_id_sync)
self.utils.validate_replication_group_config(
rep_configs, [extra_specs])
def test_validate_replication_group_config_no_rep_configured(self):
rep_configs = None
extra_specs_list = [
self.data.vol_type_extra_specs_rep_enabled_backend_id_sync]
self.assertRaises(exception.InvalidInput,
self.utils.validate_replication_group_config,
rep_configs, extra_specs_list)
try:
self.utils.validate_replication_group_config(
rep_configs, extra_specs_list)
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: No replication devices are defined '
'in cinder.conf, can not enable volume group replication.')
self.assertEqual(expected_msg, e.msg)
def test_validate_replication_group_config_vol_type_not_rep_enabled(self):
rep_configs = self.data.multi_rep_config_list
extra_specs_list = [self.data.vol_type_extra_specs]
self.assertRaises(exception.InvalidInput,
self.utils.validate_replication_group_config,
rep_configs, extra_specs_list)
try:
self.utils.validate_replication_group_config(
rep_configs, extra_specs_list)
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: Replication is not enabled for a '
'Volume Type, all Volume Types in a replication enabled '
'Volume Group must have replication enabled.')
self.assertEqual(expected_msg, e.msg)
def test_validate_replication_group_config_cant_get_rep_config(self):
rep_configs = self.data.multi_rep_config_list
vt_extra_specs = (
self.data.vol_type_extra_specs_rep_enabled_backend_id_sync)
vt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = 'invalid'
extra_specs_list = [vt_extra_specs]
self.assertRaises(exception.InvalidInput,
self.utils.validate_replication_group_config,
rep_configs, extra_specs_list)
try:
self.utils.validate_replication_group_config(
rep_configs, extra_specs_list)
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: Unable to determine which '
'rep_device to use from cinder.conf. Could not validate '
'volume types being added to group.')
self.assertEqual(expected_msg, e.msg)
def test_validate_replication_group_config_non_sync_mode(self):
rep_configs = self.data.multi_rep_config_list
extra_specs_list = [
self.data.vol_type_extra_specs_rep_enabled_backend_id_async]
self.assertRaises(exception.InvalidInput,
self.utils.validate_replication_group_config,
rep_configs, extra_specs_list)
try:
self.utils.validate_replication_group_config(
rep_configs, extra_specs_list)
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: Replication for Volume Type is not '
'set to Synchronous. Only Synchronous can be used with '
'replication groups')
self.assertEqual(expected_msg, e.msg)
@mock.patch.object(utils.PowerMaxUtils, 'get_rep_config')
def test_validate_replication_group_config_multiple_rep_backend_ids(
self, mck_get):
side_effect_list = [
self.data.rep_config_sync, self.data.rep_config_sync_2]
mck_get.side_effect = side_effect_list
rep_configs = self.data.multi_rep_config_list
ex_specs_1 = deepcopy(
self.data.vol_type_extra_specs_rep_enabled_backend_id_sync)
ex_specs_2 = deepcopy(
self.data.vol_type_extra_specs_rep_enabled_backend_id_sync_2)
extra_specs_list = [ex_specs_1, ex_specs_2]
self.assertRaises(exception.InvalidInput,
self.utils.validate_replication_group_config,
rep_configs, extra_specs_list)
mck_get.side_effect = side_effect_list
try:
self.utils.validate_replication_group_config(
rep_configs, extra_specs_list)
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: Multiple replication backend ids '
'detected please ensure only a single replication device '
'(backend_id) is used for all Volume Types in a Volume '
'Group.')
self.assertEqual(expected_msg, e.msg)
def test_validate_non_replication_group_config_success(self):
extra_specs_list = [
self.data.vol_type_extra_specs]
self.utils.validate_non_replication_group_config(extra_specs_list)
def test_validate_non_replication_group_config_failure(self):
extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123',
utils.IS_RE: '<is> True'}
self.assertRaises(exception.InvalidInput,
self.utils.validate_non_replication_group_config,
[extra_specs])
try:
self.utils.validate_non_replication_group_config([extra_specs])
except exception.InvalidInput as e:
expected_msg = (
'Invalid input received: Replication is enabled in one or '
'more of the Volume Types being added to new Volume Group '
'but the Volume Group is not replication enabled. Please '
'enable replication in the Volume Group or select only '
'non-replicated Volume Types.')
self.assertEqual(expected_msg, e.msg)
def test_get_migration_delete_extra_specs_replicated(self):
volume = self.data.test_volume
metadata = deepcopy(self.data.volume_metadata)
metadata[utils.IS_RE_CAMEL] = 'True'
metadata['ReplicationMode'] = utils.REP_SYNC
metadata['RDFG-Label'] = self.data.rdf_group_name_1
volume.metadata = metadata
extra_specs = deepcopy(self.data.extra_specs)
rep_configs = self.data.multi_rep_config_list
updated_extra_specs = self.utils.get_migration_delete_extra_specs(
volume, extra_specs, rep_configs)
ref_extra_specs = deepcopy(self.data.extra_specs)
ref_extra_specs[utils.IS_RE] = True
ref_extra_specs[utils.REP_MODE] = utils.REP_SYNC
ref_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
ref_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = (
self.data.rep_backend_id_sync)
self.assertEqual(ref_extra_specs, updated_extra_specs)
def test_get_migration_delete_extra_specs_non_replicated(self):
volume = self.data.test_volume
volume.metadata = self.data.volume_metadata
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.IS_RE] = True
updated_extra_specs = self.utils.get_migration_delete_extra_specs(
volume, extra_specs, None)
self.assertEqual(self.data.extra_specs, updated_extra_specs)

File diff suppressed because it is too large Load Diff

View File

@ -120,6 +120,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
- User defined override for short host name and port group name
(bp powermax-user-defined-hostname-portgroup)
- Switch to Unisphere REST API public replication endpoints
- Support for multiple replication devices
"""
VERSION = "4.2.0"

View File

@ -125,6 +125,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
- User defined override for short host name and port group name
(bp powermax-user-defined-hostname-portgroup)
- Switch to Unisphere REST API public replication endpoints
- Support for multiple replication devices
"""
VERSION = "4.2.0"

View File

@ -671,9 +671,10 @@ class PowerMaxMasking(object):
'sg_name': storagegroup_name})
else:
try:
force = True if extra_specs.get(utils.IS_RE) else False
self.add_volume_to_storage_group(
serial_number, device_id, storagegroup_name,
volume_name, extra_specs)
volume_name, extra_specs, force)
except Exception as e:
msg = ("Exception adding volume %(vol)s to %(sg)s. "
"Exception received was %(e)s."

View File

@ -474,7 +474,7 @@ class PowerMaxVolumeMetadata(object):
None, None, None, None)
rep_mode, replication_status, rdf_group_label, use_bias = (
None, None, None, None)
target_array_model = None
target_array_model, backend_id = None, None
if rep_info_dict:
rdf_group_no = rep_info_dict['rdf_group_no']
target_name = rep_info_dict['target_name']
@ -483,6 +483,8 @@ class PowerMaxVolumeMetadata(object):
rep_mode = rep_info_dict['rep_mode']
replication_status = rep_info_dict['replication_status']
rdf_group_label = rep_info_dict['rdf_group_label']
backend_id = rep_info_dict['backend_id']
if utils.METROBIAS in extra_specs:
use_bias = extra_specs[utils.METROBIAS]
target_array_model = rep_info_dict['target_array_model']
@ -501,7 +503,7 @@ class PowerMaxVolumeMetadata(object):
openstack_name=volume.display_name,
source_volid=volume.source_volid,
group_name=group_name, group_id=group_id,
rdf_group_no=rdf_group_no,
rdf_group_no=rdf_group_no, backend_id=backend_id,
target_name=target_name, remote_array=remote_array,
target_device_id=target_device_id,
source_snapshot_id=source_snapshot_id,
@ -585,8 +587,8 @@ class PowerMaxVolumeMetadata(object):
successful_operation = "manage_existing_volume"
rdf_group_no, target_name, remote_array, target_device_id = (
None, None, None, None)
rep_mode, replication_status, rdf_group_label = (
None, None, None)
rep_mode, replication_status, rdf_group_label, backend_id = (
None, None, None, None)
if rep_info_dict:
rdf_group_no = rep_info_dict['rdf_group_no']
target_name = rep_info_dict['target_name']
@ -595,6 +597,7 @@ class PowerMaxVolumeMetadata(object):
rep_mode = rep_info_dict['rep_mode']
replication_status = rep_info_dict['replication_status']
rdf_group_label = rep_info_dict['rdf_group_label']
backend_id = rep_info_dict['backend_id']
default_sg = self.utils.derive_default_sg_from_extra_specs(
extra_specs, rep_mode)
@ -609,7 +612,7 @@ class PowerMaxVolumeMetadata(object):
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
source_volid=volume.source_volid,
rdf_group_no=rdf_group_no,
rdf_group_no=rdf_group_no, backend_id=backend_id,
target_name=target_name, remote_array=remote_array,
target_device_id=target_device_id,
rep_mode=rep_mode, replication_status=replication_status,
@ -623,7 +626,7 @@ class PowerMaxVolumeMetadata(object):
def capture_retype_info(
self, volume, device_id, array, srp, target_slo,
target_workload, target_sg_name, is_rep_enabled, rep_mode,
is_compression_disabled):
is_compression_disabled, target_backend_id):
"""Captures manage existing info in volume metadata
:param volume_id: volume identifier
@ -636,6 +639,7 @@ class PowerMaxVolumeMetadata(object):
:param is_rep_enabled: replication enabled flag
:param rep_mode: replication mode
:param is_compression_disabled: compression disabled flag
:param target_backend_id: target replication backend id
"""
successful_operation = "retype"
if not target_slo:
@ -651,12 +655,14 @@ class PowerMaxVolumeMetadata(object):
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
is_rep_enabled=('yes' if is_rep_enabled else 'no'),
rep_mode=rep_mode, is_compression_disabled=(
backend_id=target_backend_id, rep_mode=rep_mode,
is_compression_disabled=(
True if is_compression_disabled else False))
if not is_rep_enabled:
delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model',
'service_level', 'remote_array', 'target_device_id',
'replication_status', 'rdf_group_label']
'replication_status', 'rdf_group_label',
'backend_id']
self.utils.delete_values_from_dict(datadict, delete_list)
update_list = [('default_sg_name', 'source_sg_name'),
('service_level', 'source_service_level')]

View File

@ -94,6 +94,12 @@ class PowerMaxProvision(object):
def do_create_volume_from_sg(storage_group, array):
start_time = time.time()
if rep_info and rep_info.get('initial_device_list', False):
local_device_list = self.rest.get_volume_list(
extra_specs['array'],
{'storageGroupId': storagegroup_name})
rep_info['initial_device_list'] = local_device_list
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs, rep_info)
@ -560,7 +566,7 @@ class PowerMaxProvision(object):
:param array: the array serial number
:param device_id: the source device id
:param sg_name: storage grto
:param sg_name: storage group
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair

View File

@ -924,6 +924,7 @@ class PowerMaxRest(object):
task = self.wait_for_job('Create volume', status_code,
job, extra_specs)
# Find the newly created volume.
device_id = None
if rep_info:
updated_device_list = self.get_volume_list(

View File

@ -65,6 +65,7 @@ VOL_NAME = 'volume_name'
EXTRA_SPECS = 'extra_specs'
HOST_NAME = 'short_host_name'
IS_RE = 'replication_enabled'
IS_RE_CAMEL = 'ReplicationEnabled'
DISABLECOMPRESSION = 'storagetype:disablecompression'
REP_SYNC = 'Synchronous'
REP_ASYNC = 'Asynchronous'
@ -82,6 +83,10 @@ RDF_CONS_EXEMPT = 'exempt'
RDF_ALLOW_METRO_DELETE = 'allow_delete_metro'
RDF_GROUP_NO = 'rdf_group_number'
METROBIAS = 'metro_bias'
BACKEND_ID = 'backend_id'
BACKEND_ID_LEGACY_REP = 'backend_id_legacy_rep'
REPLICATION_DEVICE_BACKEND_ID = 'storagetype:replication_device_backend_id'
REP_CONFIG = 'rep_config'
DEFAULT_PORT = 8443
CLONE_SNAPSHOT_NAME = "snapshot_for_clone"
STORAGE_GROUP_TAGS = 'storagetype:storagegrouptags'
@ -436,15 +441,31 @@ class PowerMaxUtils(object):
else:
return True
def change_replication(self, vol_is_replicated, new_type):
def change_replication(self, curr_type_extra_specs, tgt_type_extra_specs):
"""Check if volume types have different replication status.
:param vol_is_replicated: from source
:param new_type: from target
:param curr_type_extra_specs: extra specs from source volume type
:param tgt_type_extra_specs: extra specs from target volume type
:returns: bool
"""
is_tgt_rep = self.is_replication_enabled(new_type['extra_specs'])
return vol_is_replicated != is_tgt_rep
change_replication = False
# Compare non-rep & rep enabled changes
is_cur_rep = self.is_replication_enabled(curr_type_extra_specs)
is_tgt_rep = self.is_replication_enabled(tgt_type_extra_specs)
rep_enabled_diff = is_cur_rep != is_tgt_rep
if rep_enabled_diff:
change_replication = True
elif is_cur_rep:
# Both types are rep enabled, check for backend id differences
rdbid = REPLICATION_DEVICE_BACKEND_ID
curr_rep_backend_id = curr_type_extra_specs.get(rdbid, None)
tgt_rep_backend_id = tgt_type_extra_specs.get(rdbid, None)
rdbid_diff = curr_rep_backend_id != tgt_rep_backend_id
if rdbid_diff:
change_replication = True
return change_replication
@staticmethod
def is_replication_enabled(extra_specs):
@ -463,21 +484,26 @@ class PowerMaxUtils(object):
"""Gather necessary replication configuration info.
:param rep_device_list: the replication device list from cinder.conf
:returns: rep_config, replication configuration dict
:returns: rep_configs, replication configuration list
"""
rep_config = {}
rep_config = list()
if not rep_device_list:
return None
else:
target = rep_device_list[0]
for rep_device in rep_device_list:
rep_config_element = {}
try:
rep_config['array'] = target['target_device_id']
rep_config['srp'] = target['remote_pool']
rep_config['rdf_group_label'] = target['rdf_group_label']
rep_config['portgroup'] = target['remote_port_group']
rep_config_element['array'] = rep_device[
'target_device_id']
rep_config_element['srp'] = rep_device['remote_pool']
rep_config_element['rdf_group_label'] = rep_device[
'rdf_group_label']
rep_config_element['portgroup'] = rep_device[
'remote_port_group']
except KeyError as ke:
error_message = (_("Failed to retrieve all necessary SRDF "
error_message = (
_("Failed to retrieve all necessary SRDF "
"information. Error received: %(ke)s.") %
{'ke': six.text_type(ke)})
LOG.exception(error_message)
@ -485,35 +511,43 @@ class PowerMaxUtils(object):
message=error_message)
try:
rep_config['sync_retries'] = int(target['sync_retries'])
rep_config['sync_interval'] = int(target['sync_interval'])
rep_config_element['sync_retries'] = int(
rep_device['sync_retries'])
rep_config_element['sync_interval'] = int(
rep_device['sync_interval'])
except (KeyError, ValueError) as ke:
LOG.debug("SRDF Sync wait/retries options not set or set "
LOG.debug(
"SRDF Sync wait/retries options not set or set "
"incorrectly, defaulting to 200 retries with a 3 "
"second wait. Configuration load warning: %(ke)s.",
{'ke': six.text_type(ke)})
rep_config['sync_retries'] = 200
rep_config['sync_interval'] = 3
rep_config_element['sync_retries'] = 200
rep_config_element['sync_interval'] = 3
allow_extend = target.get('allow_extend', 'false')
allow_extend = rep_device.get('allow_extend', 'false')
if strutils.bool_from_string(allow_extend):
rep_config['allow_extend'] = True
rep_config_element['allow_extend'] = True
else:
rep_config['allow_extend'] = False
rep_config_element['allow_extend'] = False
rep_mode = target.get('mode', '')
rep_mode = rep_device.get('mode', '')
if rep_mode.lower() in ['async', 'asynchronous']:
rep_config['mode'] = REP_ASYNC
rep_config_element['mode'] = REP_ASYNC
elif rep_mode.lower() == 'metro':
rep_config['mode'] = REP_METRO
metro_bias = target.get('metro_use_bias', 'false')
rep_config_element['mode'] = REP_METRO
metro_bias = rep_device.get('metro_use_bias', 'false')
if strutils.bool_from_string(metro_bias):
rep_config[METROBIAS] = True
rep_config_element[METROBIAS] = True
else:
rep_config[METROBIAS] = False
rep_config_element[METROBIAS] = False
else:
rep_config['mode'] = REP_SYNC
rep_config_element['mode'] = REP_SYNC
backend_id = rep_device.get(BACKEND_ID, '')
if backend_id:
rep_config_element[BACKEND_ID] = backend_id
rep_config.append(rep_config_element)
return rep_config
@staticmethod
@ -737,12 +771,12 @@ class PowerMaxUtils(object):
:param rep_config: the replication configuration
:returns: group name
"""
async_grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg"
% {'rdf': rep_config['rdf_group_label'],
grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg" %
{'rdf': rep_config['rdf_group_label'],
'mode': rep_config['mode']})
LOG.debug("The async/ metro rdf managed group name is %(name)s",
{'name': async_grp_name})
return async_grp_name
LOG.debug("The rdf managed group name is %(name)s",
{'name': grp_name})
return grp_name
def is_metro_device(self, rep_config, extra_specs):
"""Determine if a volume is a Metro enabled device.
@ -753,7 +787,7 @@ class PowerMaxUtils(object):
"""
is_metro = (True if self.is_replication_enabled(extra_specs)
and rep_config is not None
and rep_config['mode'] == REP_METRO else False)
and rep_config.get('mode') == REP_METRO else False)
return is_metro
def does_vol_need_rdf_management_group(self, extra_specs):
@ -1046,6 +1080,64 @@ class PowerMaxUtils(object):
message=exception_message)
return property_dict
@staticmethod
def validate_multiple_rep_device(rep_devices):
"""Validate the validity of multiple replication devices.
Validates uniqueness and presence of backend ids in rep_devices,
consistency in target arrays and replication modes when multiple
replication devices are present in cinder.conf.
:param rep_devices: rep_devices imported from cinder.conf --list
"""
rdf_group_labels = set()
backend_ids = set()
rep_modes = set()
target_arrays = set()
for rep_device in rep_devices:
backend_id = rep_device.get(BACKEND_ID)
if backend_id:
if backend_id in backend_ids:
msg = (_('Backend IDs must be unique across all '
'rep_device when multiple replication devices '
'are defined in cinder.conf, backend_id %s is '
'defined more than once.') % backend_id)
raise exception.InvalidConfigurationValue(msg)
else:
msg = _('Backend IDs must be assigned for each rep_device '
'when multiple replication devices are defined in '
'cinder.conf.')
raise exception.InvalidConfigurationValue(msg)
backend_ids.add(backend_id)
rdf_group_label = rep_device.get('rdf_group_label')
if rdf_group_label in rdf_group_labels:
msg = (_('RDF Group Labels must be unique across all '
'rep_device when multiple replication devices are '
'defined in cinder.conf. RDF Group Label %s is '
'defined more than once.') % rdf_group_label)
raise exception.InvalidConfigurationValue(msg)
rdf_group_labels.add(rdf_group_label)
rep_mode = rep_device.get('mode', REP_SYNC)
if rep_mode in rep_modes:
msg = (_('RDF Modes must be unique across all '
'replication_device. Found multiple instances of %s '
'mode defined in cinder.conf.') % rep_mode)
raise exception.InvalidConfigurationValue(msg)
rep_modes.add(rep_mode)
target_device_id = rep_device.get('target_device_id')
target_arrays.add(target_device_id)
target_arrays.discard(None)
if len(target_arrays) > 1:
msg = _('Found multiple target_device_id set in cinder.conf. A '
'single target_device_id value must be used across all '
'replication device when defining using multiple '
'replication devices.')
raise exception.InvalidConfigurationValue(msg)
@staticmethod
def compare_cylinders(cylinders_source, cylinder_target):
"""Compare number of cylinders of source and target.
@ -1556,19 +1648,27 @@ class PowerMaxUtils(object):
return payload
@staticmethod
def is_retype_supported(volume, src_extra_specs, tgt_extra_specs):
def is_retype_supported(volume, src_extra_specs, tgt_extra_specs,
rep_configs):
"""Determine if a retype operation involving Metro is supported.
:param volume: the volume object -- obj
:param src_extra_specs: the source extra specs -- dict
:param tgt_extra_specs: the target extra specs -- dict
:param rep_configs: imported cinder.conf replication devices -- dict
:returns: is supported -- bool
"""
if volume.attach_status == 'detached':
return True
src_rep_mode = src_extra_specs.get('rep_mode', None)
tgt_rep_mode = tgt_extra_specs.get('rep_mode', None)
tgt_rep_mode = None
if PowerMaxUtils.is_replication_enabled(tgt_extra_specs):
target_backend_id = tgt_extra_specs.get(
REPLICATION_DEVICE_BACKEND_ID, BACKEND_ID_LEGACY_REP)
target_rep_config = PowerMaxUtils.get_rep_config(
target_backend_id, rep_configs)
tgt_rep_mode = target_rep_config.get('mode', REP_SYNC)
if tgt_rep_mode != REP_METRO:
return True
@ -1578,3 +1678,196 @@ class PowerMaxUtils(object):
else:
if not src_rep_mode or src_rep_mode in [REP_SYNC, REP_ASYNC]:
return False
@staticmethod
def get_rep_config(backend_id, rep_configs):
"""Get rep_config for given backend_id.
:param backend_id: rep config search key -- str
:param rep_configs: backend rep_configs -- list
:returns: rep_config -- dict
"""
if len(rep_configs) == 1:
rep_device = rep_configs[0]
else:
rep_device = None
for rep_config in rep_configs:
if rep_config[BACKEND_ID] == backend_id:
rep_device = rep_config
if rep_device is None:
msg = _('Could not find a rep_device with a backend_id of '
'%s. Please confirm that the '
'replication_device_backend_id extra spec for this '
'volume type matches the backend_id of the intended '
'rep_device in cinder.conf') % backend_id
LOG.error(msg)
raise exception.InvalidInput('Unable to get rep config.')
return rep_device
@staticmethod
def get_replication_targets(rep_configs):
"""Set the replication targets for the backend.
:param rep_configs: backend rep_configs -- list
:returns: arrays configured for replication -- list
"""
replication_targets = set()
if rep_configs:
for rep_config in rep_configs:
array = rep_config.get(ARRAY)
if array:
replication_targets.add(array)
return list(replication_targets)
def validate_failover_request(self, is_failed_over, failover_backend_id,
rep_configs):
"""Validate failover_host request's parameters
Validate that a failover_host operation can be performed with
the user entered parameters and system configuration/state
:param is_failed_over: current failover state
:param failover_backend_id: backend_id given during failover request
:param rep_configs: backend rep_configs -- list
:return: (bool, str) is valid, reason on invalid
"""
is_valid = True
msg = ""
if is_failed_over:
if failover_backend_id != 'default':
is_valid = False
msg = _('Cannot failover, the backend is already in a failed '
'over state, if you meant to failback, please add '
'--backend_id default to the command.')
else:
if failover_backend_id == 'default':
is_valid = False
msg = _('Cannot failback, backend is not in a failed over '
'state. If you meant to failover, please either omit '
'the --backend_id parameter or use the --backend_id '
'parameter with a valid backend id.')
elif len(rep_configs) > 1:
if failover_backend_id is None:
is_valid = False
msg = _('Cannot failover, no backend_id provided while '
'multiple replication devices are defined in '
'cinder.conf, please provide a backend_id '
'which will act as new primary array by '
'appending --backend_id <id> to your command.')
else:
rc = self.get_rep_config(failover_backend_id, rep_configs)
if rc is None:
is_valid = False
msg = _('Can not find replication device with '
'backend_id of %s') % failover_backend_id
return is_valid, msg
def validate_replication_group_config(self, rep_configs, extra_specs_list):
"""Validate replication group configuration
Validate the extra specs of volume types being added to
a volume group against rep_config imported from cinder.conf
:param rep_configs: list of replication_device dicts from cinder.conf
:param extra_specs_list: extra_specs of volume types added to group
:raises InvalidInput: If any of the validation check fail
"""
if not rep_configs:
LOG.error('No replication devices set in cinder.conf please '
'disable replication in Volume Group extra specs '
'or add replication device to cinder.conf.')
msg = _('No replication devices are defined in cinder.conf, '
'can not enable volume group replication.')
raise exception.InvalidInput(reason=msg)
rep_group_backend_ids = set()
for extra_specs in extra_specs_list:
target_backend_id = extra_specs.get(
REPLICATION_DEVICE_BACKEND_ID,
BACKEND_ID_LEGACY_REP)
try:
target_rep_config = self.get_rep_config(
target_backend_id, rep_configs)
rep_group_backend_ids.add(target_backend_id)
except exception.InvalidInput:
target_rep_config = None
if not (extra_specs.get(IS_RE) == '<is> True'):
# Replication is disabled or not set to correct value
# in the Volume Type being added
msg = _('Replication is not enabled for a Volume Type, '
'all Volume Types in a replication enabled '
'Volume Group must have replication enabled.')
raise exception.InvalidInput(reason=msg)
if not target_rep_config:
# Unable to determine rep_configs to use.
msg = _('Unable to determine which rep_device to use from '
'cinder.conf. Could not validate volume types being '
'added to group.')
raise exception.InvalidInput(reason=msg)
# Verify that replication is Synchronous mode
if not target_rep_config.get('mode'):
LOG.warning('Unable to verify the replication mode '
'of Volume Type, please ensure only '
'Synchronous replication is used.')
elif not target_rep_config['mode'] == REP_SYNC:
msg = _('Replication for Volume Type is not set '
'to Synchronous. Only Synchronous '
'can be used with replication groups')
raise exception.InvalidInput(reason=msg)
if len(rep_group_backend_ids) > 1:
# We should only have a single backend_id
# (replication type) across all the Volume Types
msg = _('Multiple replication backend ids detected '
'please ensure only a single replication device '
'(backend_id) is used for all Volume Types in a '
'Volume Group.')
raise exception.InvalidInput(reason=msg)
@staticmethod
def validate_non_replication_group_config(extra_specs_list):
"""Validate volume group configuration
Validate that none of the Volume Type extra specs are
replication enabled.
:param extra_specs_list: list of Volume Type extra specs
:return: bool replication enabled found in any extra specs
"""
for extra_specs in extra_specs_list:
if extra_specs.get(IS_RE) == '<is> True':
msg = _('Replication is enabled in one or more of the '
'Volume Types being added to new Volume Group but '
'the Volume Group is not replication enabled. Please '
'enable replication in the Volume Group or select '
'only non-replicated Volume Types.')
raise exception.InvalidInput(reason=msg)
@staticmethod
def get_migration_delete_extra_specs(volume, extra_specs, rep_configs):
"""Get previous extra specs rep details during migration delete
:param volume: volume object -- volume
:param extra_specs: volumes extra specs -- dict
:param rep_configs: imported cinder.conf replication devices -- dict
:returns: updated extra specs -- dict
"""
metadata = volume.metadata
replication_enabled = strutils.bool_from_string(
metadata.get(IS_RE_CAMEL, 'False'))
if replication_enabled:
rdfg_label = metadata['RDFG-Label']
rep_config = next(
(r_c for r_c in rep_configs if r_c[
'rdf_group_label'] == rdfg_label), None)
extra_specs[IS_RE] = replication_enabled
extra_specs[REP_MODE] = metadata['ReplicationMode']
extra_specs[REP_CONFIG] = rep_config
extra_specs[REPLICATION_DEVICE_BACKEND_ID] = rep_config[BACKEND_ID]
else:
extra_specs.pop(IS_RE, None)
return extra_specs

View File

@ -0,0 +1,5 @@
---
features:
- |
PowerMax Driver - Support to allow the use of multiple replication modes on
one backend array.