From a4c13863c432489f74f4c987eab2f89a98f37aab Mon Sep 17 00:00:00 2001 From: odonos12 Date: Tue, 25 Feb 2020 09:05:41 +0000 Subject: [PATCH] PowerMax Driver - Support of Multiple Replication Add support for multiple replication devices in the PowerMax driver for Cinder to allow the use of multiple replication modes concurrently. Change-Id: I8fcd5bb0209ede5099a9f2f5c23b7da5b59a819a Implements: blueprint powermax-multiple-replication-devices --- .../dell_emc/powermax/powermax_data.py | 162 +++- .../powermax/powermax_fake_objects.py | 4 +- .../dell_emc/powermax/test_powermax_common.py | 232 ++++-- .../powermax/test_powermax_masking.py | 39 +- .../powermax/test_powermax_metadata.py | 4 +- .../powermax/test_powermax_provision.py | 6 +- .../powermax/test_powermax_replication.py | 220 +++--- .../dell_emc/powermax/test_powermax_rest.py | 135 ++-- .../dell_emc/powermax/test_powermax_utils.py | 372 +++++++++- .../drivers/dell_emc/powermax/common.py | 693 +++++++++++------- cinder/volume/drivers/dell_emc/powermax/fc.py | 1 + .../volume/drivers/dell_emc/powermax/iscsi.py | 1 + .../drivers/dell_emc/powermax/masking.py | 3 +- .../drivers/dell_emc/powermax/metadata.py | 22 +- .../drivers/dell_emc/powermax/provision.py | 8 +- .../volume/drivers/dell_emc/powermax/rest.py | 1 + .../volume/drivers/dell_emc/powermax/utils.py | 405 ++++++++-- ...-replication-devices-0cc532ae621ea9a5.yaml | 5 + 18 files changed, 1758 insertions(+), 555 deletions(-) create mode 100644 releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py index afd72191ec2..ff116e4349d 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py @@ -38,7 +38,6 @@ class PowerMaxData(object): array_herc = '000197900123' array_model = 'PowerMax_8000' srp = 'SRP_1' - srp2 = 'SRP_2' slo = 'Diamond' slo_diamond = 'Diamond' slo_silver = 'Silver' @@ -71,15 +70,22 @@ class PowerMaxData(object): device_id2 = '00002' device_id3 = '00003' device_id4 = '00004' - rdf_group_name = '23_24_007' - rdf_group_no = '70' + rdf_group_name_1 = '23_24_007' + rdf_group_name_2 = '23_24_008' + rdf_group_name_3 = '23_24_009' + rdf_group_name_4 = '23_24_010' + rdf_group_no_1 = '70' + rdf_group_no_2 = '71' + rdf_group_no_3 = '72' + rdf_group_no_4 = '73' u4v_version = '91' storagegroup_name_source = 'Grp_source_sg' storagegroup_name_target = 'Grp_target_sg' group_snapshot_name = 'Grp_snapshot' target_group_name = 'Grp_target' storagegroup_name_with_id = 'GrpId_group_name' - rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name + rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name_1 + default_sg_re_managed_list = [default_sg_re_enabled, rdf_managed_async_grp] volume_id = '2b06255d-f5f0-4520-a953-b029196add6a' no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG' temp_snapvx = 'temp-00001-snapshot_for_clone' @@ -266,6 +272,87 @@ class PowerMaxData(object): test_host = {'capabilities': location_info, 'host': fake_host} + # replication + rep_backend_id_sync = 'rep_backend_id_sync' + rep_backend_id_async = 'rep_backend_id_async' + rep_backend_id_metro = 'rep_backend_id_metro' + rep_backend_id_sync_2 = 'rep_backend_id_sync_2' + + rep_dev_1 = { + utils.BACKEND_ID: rep_backend_id_sync, + 'target_device_id': remote_array, + 'remote_port_group': port_group_name_f, + 'remote_pool': srp, + 'rdf_group_label': rdf_group_name_1, + 'mode': utils.REP_SYNC, + 'allow_extend': True} + rep_dev_2 = { + utils.BACKEND_ID: rep_backend_id_async, + 'target_device_id': remote_array, + 'remote_port_group': port_group_name_f, + 'remote_pool': srp, + 'rdf_group_label': rdf_group_name_2, + 'mode': utils.REP_ASYNC, + 'allow_extend': True} + rep_dev_3 = { + utils.BACKEND_ID: rep_backend_id_metro, + 'target_device_id': remote_array, + 'remote_port_group': port_group_name_f, + 'remote_pool': srp, + 'rdf_group_label': rdf_group_name_3, + 'mode': utils.REP_METRO, + 'allow_extend': True} + sync_rep_device = [rep_dev_1] + async_rep_device = [rep_dev_2] + metro_rep_device = [rep_dev_3] + multi_rep_device = [rep_dev_1, rep_dev_2, rep_dev_3] + + rep_config_sync = { + utils.BACKEND_ID: rep_backend_id_sync, + 'array': remote_array, + 'portgroup': port_group_name_f, + 'srp': srp, + 'rdf_group_label': rdf_group_name_1, + 'mode': utils.REP_SYNC, + 'allow_extend': True, + 'sync_interval': 3, + 'sync_retries': 200} + rep_config_async = { + utils.BACKEND_ID: rep_backend_id_async, + 'array': remote_array, + 'portgroup': port_group_name_f, + 'srp': srp, + 'rdf_group_label': rdf_group_name_2, + 'mode': utils.REP_ASYNC, + 'allow_extend': True, + 'sync_interval': 3, + 'sync_retries': 200} + rep_config_metro = { + utils.BACKEND_ID: rep_backend_id_metro, + 'array': remote_array, + 'portgroup': port_group_name_f, + 'srp': srp, + 'rdf_group_label': rdf_group_name_3, + 'mode': utils.REP_METRO, + 'allow_extend': True, + 'sync_interval': 3, + 'sync_retries': 200} + rep_config_sync_2 = { + utils.BACKEND_ID: rep_backend_id_sync_2, + 'array': remote_array, + 'portgroup': port_group_name_f, + 'srp': srp, + 'rdf_group_label': rdf_group_name_1, + 'mode': utils.REP_SYNC, + 'allow_extend': True, + 'sync_interval': 3, + 'sync_retries': 200} + sync_rep_config_list = [rep_config_sync] + async_rep_config_list = [rep_config_async] + metro_rep_config_list = [rep_config_metro] + multi_rep_config_list = [rep_config_sync, rep_config_async, + rep_config_metro, rep_config_sync_2] + # extra-specs vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'} vol_type_extra_specs_compr_disabled = { @@ -274,6 +361,18 @@ class PowerMaxData(object): vol_type_extra_specs_rep_enabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': ' True'} + vol_type_extra_specs_rep_enabled_backend_id_sync = { + 'pool_name': u'Diamond+DSS+SRP_1+000197800123', + 'replication_enabled': ' True', + utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync} + vol_type_extra_specs_rep_enabled_backend_id_sync_2 = { + 'pool_name': u'Diamond+DSS+SRP_1+000197800123', + 'replication_enabled': ' True', + utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync_2} + vol_type_extra_specs_rep_enabled_backend_id_async = { + 'pool_name': u'Diamond+DSS+SRP_1+000197800123', + 'replication_enabled': ' True', + utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_async} extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': slo, 'workload': workload, @@ -302,21 +401,23 @@ class PowerMaxData(object): rep_extra_specs['array'] = remote_array rep_extra_specs['interval'] = 1 rep_extra_specs['retries'] = 1 - rep_extra_specs['srp'] = srp2 + rep_extra_specs['srp'] = srp rep_extra_specs['rep_mode'] = 'Synchronous' rep_extra_specs['sync_interval'] = 3 rep_extra_specs['sync_retries'] = 200 - rep_extra_specs['rdf_group_label'] = rdf_group_name - rep_extra_specs['rdf_group_no'] = rdf_group_no + rep_extra_specs['rdf_group_label'] = rdf_group_name_1 + rep_extra_specs['rdf_group_no'] = rdf_group_no_1 rep_extra_specs2 = deepcopy(rep_extra_specs) rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f rep_extra_specs3 = deepcopy(rep_extra_specs) rep_extra_specs3['slo'] = slo rep_extra_specs3['workload'] = workload rep_extra_specs4 = deepcopy(rep_extra_specs3) - rep_extra_specs4['rdf_group_label'] = rdf_group_name + rep_extra_specs4['rdf_group_label'] = rdf_group_name_1 rep_extra_specs5 = deepcopy(rep_extra_specs2) rep_extra_specs5['target_array_model'] = 'VMAX250F' + rep_extra_specs5['sync_interval'] = 3 + rep_extra_specs5['sync_retries'] = 200 rep_extra_specs6 = deepcopy(rep_extra_specs3) rep_extra_specs6['target_array_model'] = 'PMAX2000' @@ -328,6 +429,9 @@ class PowerMaxData(object): rep_extra_specs_legacy = deepcopy(rep_extra_specs_ode) rep_extra_specs_legacy['mode'] = 'Synchronous' + rep_extra_specs_rep_config = deepcopy(rep_extra_specs6) + rep_extra_specs_rep_config[utils.REP_CONFIG] = rep_config_sync + extra_specs_tags = deepcopy(extra_specs) extra_specs_tags.update({utils.STORAGE_GROUP_TAGS: sg_tags}) @@ -335,8 +439,8 @@ class PowerMaxData(object): rep_extra_specs_mgmt['srp'] = srp rep_extra_specs_mgmt['mgmt_sg_name'] = rdf_managed_async_grp rep_extra_specs_mgmt['sg_name'] = default_sg_no_slo_re_enabled - rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no - rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name + rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no_1 + rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name_1 rep_extra_specs_mgmt['target_array_model'] = array_model rep_extra_specs_mgmt['slo'] = 'Diamond' rep_extra_specs_mgmt['workload'] = 'NONE' @@ -350,8 +454,8 @@ class PowerMaxData(object): rep_config = { 'array': remote_array, 'srp': srp, 'portgroup': port_group_name_i, - 'rdf_group_no': rdf_group_no, 'sync_retries': 200, - 'sync_interval': 1, 'rdf_group_label': rdf_group_name, + 'rdf_group_no': rdf_group_no_1, 'sync_retries': 200, + 'sync_interval': 1, 'rdf_group_label': rdf_group_name_1, 'allow_extend': True, 'mode': utils.REP_METRO} ex_specs_rep_config = deepcopy(rep_extra_specs_metro) @@ -593,12 +697,12 @@ class PowerMaxData(object): sg_rdf_details = [{'storageGroupName': test_vol_grp_name, 'symmetrixId': array, 'modes': ['Synchronous'], - 'rdfGroupNumber': rdf_group_no, + 'rdfGroupNumber': rdf_group_no_1, 'states': ['Synchronized']}, {'storageGroupName': test_fo_vol_group, 'symmetrixId': array, 'modes': ['Synchronous'], - 'rdfGroupNumber': rdf_group_no, + 'rdfGroupNumber': rdf_group_no_1, 'states': ['Failed Over']}] sg_rdf_group_details = { @@ -812,19 +916,25 @@ class PowerMaxData(object): {'name': 'another-target', 'percentageCopied': 90}] - rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no, - 'label': rdf_group_name}]} + rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no_1, + 'label': rdf_group_name_1}, + {'rdfgNumber': rdf_group_no_2, + 'label': rdf_group_name_2}, + {'rdfgNumber': rdf_group_no_3, + 'label': rdf_group_name_3}, + {'rdfgNumber': rdf_group_no_4, + 'label': rdf_group_name_4}]} rdf_group_details = {'modes': ['Synchronous'], 'remoteSymmetrix': remote_array, - 'label': rdf_group_name, + 'label': rdf_group_name_1, 'type': 'Dynamic', 'numDevices': 1, - 'remoteRdfgNumber': rdf_group_no, - 'rdfgNumber': rdf_group_no} - rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no, + 'remoteRdfgNumber': rdf_group_no_1, + 'rdfgNumber': rdf_group_no_1} + rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no_1, 'localSymmetrixId': array, 'volumeConfig': 'RDF1+TDEV', - 'localRdfGroupNumber': rdf_group_no, + 'localRdfGroupNumber': rdf_group_no_1, 'localVolumeName': device_id, 'rdfpairState': 'Synchronized', 'remoteVolumeName': device_id2, @@ -834,8 +944,8 @@ class PowerMaxData(object): 'remoteSymmetrixId': remote_array} rdf_group_vol_details_not_synced = { - 'remoteRdfGroupNumber': rdf_group_no, 'localSymmetrixId': array, - 'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no, + 'remoteRdfGroupNumber': rdf_group_no_1, 'localSymmetrixId': array, + 'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no_1, 'localVolumeName': device_id, 'rdfpairState': 'syncinprog', 'remoteVolumeName': device_id2, 'localVolumeState': 'Ready', 'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled', @@ -1096,6 +1206,8 @@ class PowerMaxData(object): 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}] + volume_create_info_dict = {utils.ARRAY: array, utils.DEVICE_ID: device_id} + volume_info_dict = { 'volume_id': volume_id, 'service_level': 'Diamond', @@ -1327,8 +1439,8 @@ class PowerMaxData(object): 'device_id': device_id, 'local_array': array, 'remote_array': remote_array, 'target_device_id': device_id2, 'target_name': 'test_vol', - 'rdf_group_no': rdf_group_no, 'rep_mode': 'Metro', - 'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name, + 'rdf_group_no': rdf_group_no_1, 'rep_mode': 'Metro', + 'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name_1, 'target_array_model': array_model, 'rdf_mgmt_grp': rdf_managed_async_grp} diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py index 710f2400c2c..1cd0f734896 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py @@ -200,7 +200,7 @@ class FakeRequestsSession(object): elif 'rdf_group' in url: if self.data.device_id in url: return_object = self.data.rdf_group_vol_details - elif self.data.rdf_group_no in url: + elif self.data.rdf_group_no_1 in url: return_object = self.data.rdf_group_details else: return_object = self.data.rdf_group_list @@ -283,7 +283,7 @@ class FakeConfiguration(object): self.config_group = volume_backend_name self.san_is_local = False if replication_device: - self.replication_device = [replication_device] + self.replication_device = replication_device for key, value in kwargs.items(): if key == 'san_login': self.san_login = value diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py index e6f26ba6c63..53f81c81568 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py @@ -44,12 +44,14 @@ class PowerMaxCommonTest(test.TestCase): super(PowerMaxCommonTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio', return_value=1.0) + replication_device = self.data.sync_rep_device configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='CommonTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc', san_api_port=8443, vmax_port_groups=[self.data.port_group_name_f], - powermax_port_group_name_template='portGroupName') + powermax_port_group_name_template='portGroupName', + replication_device=replication_device) rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=configuration) @@ -205,11 +207,11 @@ class PowerMaxCommonTest(test.TestCase): self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', - return_value='') + return_value=tpd.PowerMaxData.volume_metadata) def test_create_volume_qos(self, mck_meta): ref_model_update = ( {'provider_location': six.text_type(self.data.provider_location), - 'metadata': ''}) + 'metadata': self.data.volume_metadata}) extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['qos'] = { 'total_iops_sec': '4000', 'DistributionType': 'Always'} @@ -240,6 +242,84 @@ class PowerMaxCommonTest(test.TestCase): ast.literal_eval(ref_model_update['provider_location']), ast.literal_eval(model_update['provider_location'])) + @mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates', + return_value=(tpd.PowerMaxData.replication_update, + tpd.PowerMaxData.rep_info_dict)) + @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') + @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', + return_value=tpd.PowerMaxData.volume_create_info_dict) + @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', + return_value=(True, tpd.PowerMaxData.rep_extra_specs5, + tpd.PowerMaxData.rep_info_dict, True)) + def test_create_replication_enabled_volume_first_volume( + self, mck_prep, mck_create, mck_protect, mck_updates): + array = self.data.array + volume = self.data.test_volume + volume_name = volume.name + volume_size = volume.size + rep_extra_specs = self.data.rep_extra_specs + rep_extra_specs5 = self.data.rep_extra_specs5 + storagegroup_name = self.data.storagegroup_name_f + rep_info_dict = self.data.rep_info_dict + rep_vol = deepcopy(self.data.volume_create_info_dict) + rep_vol.update({'device_uuid': volume_name, + 'storage_group': storagegroup_name, + 'size': volume_size}) + vol, update, info = self.common._create_replication_enabled_volume( + array, volume, volume_name, volume_size, rep_extra_specs, + storagegroup_name, rep_extra_specs['rep_mode']) + mck_prep.assert_called_once_with(self.data.rep_extra_specs) + mck_create.assert_called_once_with( + array, volume_name, storagegroup_name, volume_size, + rep_extra_specs, rep_info_dict) + mck_protect.assert_called_once_with( + rep_extra_specs, rep_extra_specs5, rep_vol) + rep_vol.update({'remote_device_id': self.data.device_id2}) + mck_updates.assert_called_once_with( + rep_extra_specs, rep_extra_specs5, rep_vol) + self.assertEqual(self.data.volume_create_info_dict, vol) + self.assertEqual(self.data.replication_update, update) + self.assertEqual(self.data.rep_info_dict, info) + + @mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates', + return_value=(tpd.PowerMaxData.replication_update, + tpd.PowerMaxData.rep_info_dict)) + @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') + @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', + return_value=tpd.PowerMaxData.volume_create_info_dict) + @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', + side_effect=((False, '', '', True), + ('', tpd.PowerMaxData.rep_extra_specs5, + tpd.PowerMaxData.rep_info_dict, ''))) + def test_create_replication_enabled_volume_not_first_volume( + self, mck_prepare, mck_create, mck_protect, mck_updates): + array = self.data.array + volume = self.data.test_volume + volume_name = volume.name + volume_size = volume.size + rep_extra_specs = self.data.rep_extra_specs + rep_extra_specs5 = self.data.rep_extra_specs5 + storagegroup_name = self.data.storagegroup_name_f + rep_info_dict = self.data.rep_info_dict + rep_vol = deepcopy(self.data.volume_create_info_dict) + rep_vol.update({'device_uuid': volume_name, + 'storage_group': storagegroup_name, + 'size': volume_size}) + vol, update, info = self.common._create_replication_enabled_volume( + array, volume, volume_name, volume_size, rep_extra_specs, + storagegroup_name, rep_extra_specs['rep_mode']) + self.assertEqual(2, mck_prepare.call_count) + mck_create.assert_called_once_with( + array, volume_name, storagegroup_name, volume_size, + rep_extra_specs, rep_info_dict) + mck_protect.assert_not_called() + rep_vol.update({'remote_device_id': self.data.device_id2}) + mck_updates.assert_called_once_with( + rep_extra_specs, rep_extra_specs5, rep_vol) + self.assertEqual(self.data.volume_create_info_dict, vol) + self.assertEqual(self.data.replication_update, update) + self.assertEqual(self.data.rep_info_dict, info) + @mock.patch.object(common.PowerMaxCommon, '_clone_check') @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') @@ -823,6 +903,7 @@ class PowerMaxCommonTest(test.TestCase): ref_mv_dict = self.data.masking_view_dict self.common.next_gen = False self.common.powermax_port_group_name_template = 'portGroupName' + extra_specs.pop(utils.IS_RE, None) masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertEqual(ref_mv_dict, masking_view_dict) @@ -1027,6 +1108,7 @@ class PowerMaxCommonTest(test.TestCase): mock_delete.assert_not_called() def test_create_volume_success(self): + volume = self.data.test_volume volume_name = '1' volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs @@ -1035,11 +1117,12 @@ class PowerMaxCommonTest(test.TestCase): return_value=self.data.volume_details[0]): volume_dict, rep_update, rep_info_dict = ( self.common._create_volume( - volume_name, volume_size, extra_specs)) + volume, volume_name, volume_size, extra_specs)) self.assertEqual(ref_response, (volume_dict, rep_update, rep_info_dict)) def test_create_volume_success_next_gen(self): + volume = self.data.test_volume volume_name = '1' volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs @@ -1053,38 +1136,86 @@ class PowerMaxCommonTest(test.TestCase): self.masking, 'get_or_create_default_storage_group') as mock_get: self.common._create_volume( - volume_name, volume_size, extra_specs) + volume, volume_name, volume_size, extra_specs) mock_get.assert_called_once_with( extra_specs['array'], extra_specs[utils.SRP], extra_specs[utils.SLO], 'NONE', extra_specs, True, False, None) - def test_create_volume_failed(self): + @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', + side_effect=exception.VolumeBackendAPIException('')) + @mock.patch.object(common.PowerMaxCommon, + '_cleanup_volume_create_post_failure') + @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') + def test_create_volume_failed(self, mck_del, mck_cleanup, mck_create): + volume = self.data.test_volume volume_name = self.data.test_volume.name volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs + dev1 = self.data.device_id + dev2 = self.data.device_id2 with mock.patch.object( - self.masking, 'get_or_create_default_storage_group', - return_value=self.data.failed_resource): - with mock.patch.object( - self.rest, 'delete_storage_group') as mock_delete: - # path 1: not last vol in sg - with mock.patch.object( - self.rest, 'get_num_vols_in_sg', return_value=2): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_volume, - volume_name, volume_size, extra_specs) - mock_delete.assert_not_called() - # path 2: last vol in sg, delete sg - with mock.patch.object(self.rest, 'get_num_vols_in_sg', - return_value=0): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_volume, - volume_name, volume_size, extra_specs) - mock_delete.assert_called_once_with( - self.data.array, self.data.failed_resource) + self.rest, 'get_volumes_in_storage_group', + side_effect=[[dev1], [dev1, dev2]]): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_volume, + volume, volume_name, volume_size, + extra_specs) + mck_cleanup.assert_called_once_with( + volume, volume_name, extra_specs, [dev2]) + # path 2: no new volumes created + with mock.patch.object( + self.rest, 'get_volumes_in_storage_group', + side_effect=[[], []]): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_volume, + volume, volume_name, volume_size, + extra_specs) + mck_del.assert_called_once() + + @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') + @mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair') + @mock.patch.object( + rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [ + {utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}])) + def test_cleanup_volume_create_post_failure_rdf_enabled( + self, mck_in, mck_clean, mck_del): + array = self.data.array + volume = self.data.test_volume + volume_name = self.data.test_volume.name + extra_specs = deepcopy(self.data.extra_specs_rep_enabled) + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync + devices = [self.data.device_id] + self.common._cleanup_volume_create_post_failure( + volume, volume_name, extra_specs, devices) + mck_in.assert_called_once_with(array, self.data.device_id) + mck_clean.assert_called_once_with( + array, self.data.rdf_group_no_1, self.data.device_id, extra_specs) + mck_del.assert_called_once_with( + array, self.data.device_id, volume_name, extra_specs) + + @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') + @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') + @mock.patch.object( + rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', '')) + def test_cleanup_volume_create_post_failure_rdf_disabled( + self, mck_in, mck_remove, mck_del): + array = self.data.array + volume = self.data.test_volume + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + devices = [self.data.device_id] + self.common._cleanup_volume_create_post_failure( + volume, volume_name, extra_specs, devices) + mck_in.assert_called_once_with(array, self.data.device_id) + mck_remove.assert_called_once_with( + array, volume, self.data.device_id, volume_name, extra_specs, + False) + mck_del.assert_called_once_with( + array, self.data.device_id, volume_name, extra_specs) def test_create_volume_incorrect_slo(self): + volume = self.data.test_volume volume_name = self.data.test_volume.name volume_size = self.data.test_volume.size extra_specs = {'slo': 'Diamondz', @@ -1094,7 +1225,7 @@ class PowerMaxCommonTest(test.TestCase): self.assertRaises( exception.VolumeBackendAPIException, self.common._create_volume, - volume_name, volume_size, extra_specs) + volume, volume_name, volume_size, extra_specs) def test_set_vmax_extra_specs(self): srp_record = self.common.get_attributes_from_cinder_config() @@ -1236,8 +1367,7 @@ class PowerMaxCommonTest(test.TestCase): self.assertEqual([], metro_wwns) # Is metro volume with mock.patch.object(common.PowerMaxCommon, '_initial_setup', - return_value=self.data.rep_extra_specs_ode): - self.common.rep_config = self.data.rep_config + return_value=self.data.ex_specs_rep_config): __, metro_wwns = self.common.get_target_wwns_from_masking_view( self.data.test_volume, self.data.connector) self.assertEqual([self.data.wwnn1], metro_wwns) @@ -2416,7 +2546,7 @@ class PowerMaxCommonTest(test.TestCase): array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size + 1 - extra_specs = deepcopy(self.data.extra_specs) + extra_specs = self.data.extra_specs self.common._extend_vol_validation_checks( array, device_id, volume.name, extra_specs, volume.size, new_size) @@ -2428,7 +2558,7 @@ class PowerMaxCommonTest(test.TestCase): array = self.data.array device_id = None new_size = self.data.test_volume.size + 1 - extra_specs = deepcopy(self.data.extra_specs) + extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_vol_validation_checks, @@ -2457,7 +2587,7 @@ class PowerMaxCommonTest(test.TestCase): array = self.data.array device_id = self.data.device_id new_size = volume.size - 1 - extra_specs = deepcopy(self.data.extra_specs) + extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_vol_validation_checks, @@ -2469,7 +2599,7 @@ class PowerMaxCommonTest(test.TestCase): self.common.next_gen = False (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( - array, True) + array, self.data.rep_config_metro, True) self.assertFalse(r1_ode) self.assertFalse(r1_ode_metro) self.assertFalse(r2_ode) @@ -2487,7 +2617,7 @@ class PowerMaxCommonTest(test.TestCase): self.common.next_gen = True (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( - array, False) + array, self.data.rep_config_metro, False) self.assertTrue(r1_ode) self.assertFalse(r1_ode_metro) self.assertFalse(r2_ode) @@ -2505,7 +2635,7 @@ class PowerMaxCommonTest(test.TestCase): self.common.next_gen = True (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( - array, True) + array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertFalse(r2_ode) @@ -2523,7 +2653,7 @@ class PowerMaxCommonTest(test.TestCase): self.common.next_gen = True (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( - array, True) + array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertTrue(r2_ode) @@ -2541,7 +2671,7 @@ class PowerMaxCommonTest(test.TestCase): self.common.next_gen = True (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( - array, True) + array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertTrue(r2_ode) @@ -2562,7 +2692,7 @@ class PowerMaxCommonTest(test.TestCase): device_id = self.data.device_id new_size = volume.size + 1 extra_specs = self.data.extra_specs - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 self.common._extend_legacy_replicated_vol( array, volume, device_id, volume.name, new_size, extra_specs, rdf_group_no) @@ -2578,7 +2708,7 @@ class PowerMaxCommonTest(test.TestCase): device_id = self.data.device_id new_size = volume.size + 1 extra_specs = self.data.extra_specs - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_legacy_replicated_vol, @@ -2605,12 +2735,6 @@ class PowerMaxCommonTest(test.TestCase): port = self.common._get_unisphere_port() self.assertEqual(ref_port, port) - @mock.patch.object(utils.PowerMaxUtils, - 'get_replication_config') - def test_get_replication_info(self, mock_config): - self.common._get_replication_info() - mock_config.assert_not_called() - @mock.patch.object(common.PowerMaxCommon, '_do_sync_check') def test_sync_check_no_source_device_on_array(self, mock_check): @@ -2775,7 +2899,7 @@ class PowerMaxCommonTest(test.TestCase): 'R2-ArrayID': self.data.remote_array, 'R2-ArrayModel': self.data.array_model, 'ReplicationMode': 'Synchronized', - 'RDFG-Label': self.data.rdf_group_name, + 'RDFG-Label': self.data.rdf_group_name_1, 'R1-RDFG': 1, 'R2-RDFG': 1} array = self.data.array device_id = self.data.device_id @@ -2801,7 +2925,7 @@ class PowerMaxCommonTest(test.TestCase): 'R2-ArrayID': self.data.remote_array, 'R2-ArrayModel': self.data.array_model, 'ReplicationMode': 'Metro', - 'RDFG-Label': self.data.rdf_group_name, + 'RDFG-Label': self.data.rdf_group_name_1, 'R1-RDFG': 1, 'R2-RDFG': 1} array = self.data.array device_id = self.data.device_id @@ -3023,18 +3147,20 @@ class PowerMaxCommonTest(test.TestCase): device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id - extra_specs = self.data.rep_extra_specs + extra_specs = deepcopy(self.data.rep_extra_specs) target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False group_name = self.data.rdf_managed_async_grp + extra_specs[utils.REP_CONFIG] = self.data.rep_config_async + target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs, remote=True) - mck_get_rdf.assert_called_once_with(self.common.rep_config) + mck_get_rdf.assert_called_once_with(self.data.rep_config_async) mck_get_vol.assert_called_once_with(array, device_id) mck_get_sg.assert_called_once_with( array, srp, target_slo, target_workload, extra_specs, @@ -3084,9 +3210,10 @@ class PowerMaxCommonTest(test.TestCase): volume_name = self.data.volume_id extra_specs = self.data.rep_extra_specs target_slo = self.data.slo_silver - target_workload = deepcopy(self.data.workload) - target_extra_specs = self.data.rep_extra_specs + target_workload = self.data.workload + target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False + target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, @@ -3122,6 +3249,7 @@ class PowerMaxCommonTest(test.TestCase): target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False + target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, @@ -3153,11 +3281,13 @@ class PowerMaxCommonTest(test.TestCase): device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id - extra_specs = self.data.rep_extra_specs + extra_specs = deepcopy(self.data.rep_extra_specs) target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False + extra_specs[utils.REP_CONFIG] = self.data.rep_config_async + target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py index ca63e1d0d2a..e663571659c 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py @@ -35,9 +35,13 @@ class PowerMaxMaskingTest(test.TestCase): self.data = tpd.PowerMaxData() super(PowerMaxMaskingTest, self).setUp() volume_utils.get_max_over_subscription_ratio = mock.Mock() - configuration = mock.Mock() - configuration.safe_get.return_value = 'MaskingTests' - configuration.config_group = 'MaskingTests' + self.replication_device = self.data.sync_rep_device + configuration = tpfo.FakeConfiguration( + None, 'MaskingTests', 1, 1, san_ip='1.1.1.1', + san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1', + san_password='smc', san_api_port=8443, + vmax_port_groups=[self.data.port_group_name_f], + replication_device=self.replication_device) self._gather_info = common.PowerMaxCommon._gather_info common.PowerMaxCommon._get_u4p_failover_info = mock.Mock() common.PowerMaxCommon._gather_info = mock.Mock() @@ -50,7 +54,7 @@ class PowerMaxMaskingTest(test.TestCase): self.driver = driver self.driver_fc = driver_fc self.mask = self.driver.masking - self.extra_specs = self.data.extra_specs + self.extra_specs = deepcopy(self.data.extra_specs) self.extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_i self.maskingviewdict = self.driver._populate_masking_dict( self.data.test_volume, self.data.connector, self.extra_specs) @@ -148,19 +152,30 @@ class PowerMaxMaskingTest(test.TestCase): self.data.storagegroup_name_i, self.extra_specs) self.assertIsNotNone(msg) + @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', + return_value=(200, tpfo.tpd.PowerMaxData.job_list[0])) @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child', side_effect=[None, tpd.PowerMaxData.parent_sg_f]) @mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1, 1]) def test_move_volume_between_storage_groups( - self, mock_num, mock_parent, mock_rm): + self, mock_num, mock_parent, mock_rm, mck_mod): for x in range(0, 3): self.driver.masking.move_volume_between_storage_groups( self.data.array, self.data.device_id, self.data.storagegroup_name_i, self.data.storagegroup_name_f, self.data.extra_specs) mock_rm.assert_called_once() + ref_payload = ( + {"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "moveVolumeToStorageGroupParam": { + "volumeId": [self.data.device_id], + "storageGroupId": self.data.storagegroup_name_f, + "force": 'false'}}}) + mck_mod.assert_called_with( + self.data.array, self.data.storagegroup_name_i, ref_payload) @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child', @@ -786,6 +801,8 @@ class PowerMaxMaskingTest(test.TestCase): mock_return.assert_called_once() def test_add_volume_to_default_storage_group_next_gen(self): + extra_specs = deepcopy(self.data.extra_specs) + extra_specs.pop(utils.IS_RE, None) with mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array', return_value=True): with mock.patch.object( @@ -793,11 +810,11 @@ class PowerMaxMaskingTest(test.TestCase): 'get_or_create_default_storage_group') as mock_get: self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, - self.extra_specs) + extra_specs) mock_get.assert_called_once_with( self.data.array, self.data.srp, - self.extra_specs[utils.SLO], - 'NONE', self.extra_specs, False, False, None) + extra_specs[utils.SLO], + 'NONE', extra_specs, False, False, None) @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group') def test_get_or_create_default_storage_group(self, mock_create_sg): @@ -863,7 +880,8 @@ class PowerMaxMaskingTest(test.TestCase): mock_delete_ig.assert_called_once() def test_populate_masking_dict_init_check_false(self): - extra_specs = self.data.extra_specs + extra_specs = deepcopy(self.data.extra_specs) + extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector with mock.patch.object(self.driver, '_get_initiator_check_flag', return_value=False): @@ -872,7 +890,8 @@ class PowerMaxMaskingTest(test.TestCase): self.assertFalse(masking_view_dict['initiator_check']) def test_populate_masking_dict_init_check_true(self): - extra_specs = self.data.extra_specs + extra_specs = deepcopy(self.data.extra_specs) + extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector with mock.patch.object(self.driver, '_get_initiator_check_flag', return_value=True): diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py index b9aa5652347..1f95d619e36 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py @@ -112,7 +112,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase): def test_capture_failover_volume(self, mock_uvim): self.volume_metadata.capture_failover_volume( self.data.test_volume, self.data.device_id2, - self.data.remote_array, self.data.rdf_group_name, + self.data.remote_array, self.data.rdf_group_name_1, self.data.device_id, self.data.array, self.data.extra_specs, True, None, fields.ReplicationStatus.FAILED_OVER, utils.REP_SYNC) @@ -162,7 +162,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase): self.data.test_volume, self.data.device_id, self.data.array, self.data.srp, self.data.slo, self.data.workload, self.data.storagegroup_name_target, False, None, - False) + False, None) mock_uvim.assert_called_once() def test_update_volume_info_metadata(self): diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py index ee99a4e9cf9..d324825d957 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py @@ -285,7 +285,7 @@ class PowerMaxProvisionTest(test.TestCase): device_id = self.data.device_id new_size = '3' extra_specs = self.data.extra_specs - rdfg_num = self.data.rdf_group_no + rdfg_num = self.data.rdf_group_no_1 with mock.patch.object(self.provision.rest, 'extend_volume' ) as mock_ex: self.provision.extend_volume(array, device_id, new_size, @@ -396,7 +396,7 @@ class PowerMaxProvisionTest(test.TestCase): array = self.data.array device_id = self.data.device_id sg_name = self.data.storagegroup_name_f - rdf_group = self.data.rdf_group_no + rdf_group = self.data.rdf_group_no_1 extra_specs = self.data.rep_extra_specs # sync still in progress @@ -519,7 +519,7 @@ class PowerMaxProvisionTest(test.TestCase): def test_replicate_group(self, mock_create): self.rest.replicate_group( self.data.array, self.data.test_rep_group, - self.data.rdf_group_no, self.data.remote_array, + self.data.rdf_group_no_1, self.data.remote_array, self.data.extra_specs) mock_create.assert_called_once() diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py index 57efa8c73f2..ba7815d4c4e 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py @@ -41,12 +41,7 @@ class PowerMaxReplicationTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxReplicationTest, self).setUp() - self.replication_device = { - 'target_device_id': self.data.remote_array, - 'remote_port_group': self.data.port_group_name_f, - 'remote_pool': self.data.srp2, - 'rdf_group_label': self.data.rdf_group_name, - 'allow_extend': 'True'} + self.replication_device = self.data.sync_rep_device volume_utils.get_max_over_subscription_ratio = mock.Mock() configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, @@ -78,12 +73,7 @@ class PowerMaxReplicationTest(test.TestCase): self.extra_specs['retries'] = 1 self.extra_specs['interval'] = 1 self.extra_specs['rep_mode'] = 'Synchronous' - self.async_rep_device = { - 'target_device_id': self.data.remote_array, - 'remote_port_group': self.data.port_group_name_f, - 'remote_pool': self.data.srp2, - 'rdf_group_label': self.data.rdf_group_name, - 'allow_extend': 'True', 'mode': 'async'} + self.async_rep_device = self.data.async_rep_device async_configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array, @@ -92,12 +82,7 @@ class PowerMaxReplicationTest(test.TestCase): replication_device=self.async_rep_device) self.async_driver = fc.PowerMaxFCDriver( configuration=async_configuration) - self.metro_rep_device = { - 'target_device_id': self.data.remote_array, - 'remote_port_group': self.data.port_group_name_f, - 'remote_pool': self.data.srp2, - 'rdf_group_label': self.data.rdf_group_name, - 'allow_extend': 'True', 'mode': 'metro'} + self.metro_rep_device = self.data.metro_rep_device metro_configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array, @@ -121,9 +106,9 @@ class PowerMaxReplicationTest(test.TestCase): extra_specs = deepcopy(self.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs[utils.IS_RE] = True - rep_config = self.utils.get_replication_config( - [self.replication_device]) + rep_config = self.data.rep_config_sync rep_config[utils.RDF_CONS_EXEMPT] = False + extra_specs[utils.REP_CONFIG] = rep_config self.common._unmap_lun(self.data.test_volume, self.data.connector) mock_es.assert_called_once_with(extra_specs, rep_config) @@ -148,8 +133,8 @@ class PowerMaxReplicationTest(test.TestCase): rep_extra_specs = deepcopy(tpd.PowerMaxData.rep_extra_specs) rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f - rep_config = self.utils.get_replication_config( - [self.replication_device]) + rep_config = self.data.rep_config_sync + extra_specs[utils.REP_CONFIG] = rep_config rep_config[utils.RDF_CONS_EXEMPT] = False with mock.patch.object(self.common, '_get_replication_extra_specs', @@ -252,15 +237,16 @@ class PowerMaxReplicationTest(test.TestCase): def test_get_rdf_details(self): rdf_group_no, remote_array = self.common.get_rdf_details( - self.data.array) - self.assertEqual(self.data.rdf_group_no, rdf_group_no) + self.data.array, self.data.rep_config_sync) + self.assertEqual(self.data.rdf_group_no_1, rdf_group_no) self.assertEqual(self.data.remote_array, remote_array) def test_get_rdf_details_exception(self): with mock.patch.object(self.rest, 'get_rdf_group_number', return_value=None): self.assertRaises(exception.VolumeBackendAPIException, - self.common.get_rdf_details, self.data.array) + self.common.get_rdf_details, self.data.array, + self.data.rep_config_sync) @mock.patch.object(common.PowerMaxCommon, '_sync_check') def test_failover_host(self, mck_sync): @@ -282,17 +268,20 @@ class PowerMaxReplicationTest(test.TestCase): 'get_array_model_info', return_value=('VMAX250F', False)) def test_get_replication_extra_specs(self, mock_model): - rep_config = self.utils.get_replication_config( - [self.replication_device]) + rep_config = self.data.rep_config_sync # Path one - disable compression extra_specs1 = deepcopy(self.extra_specs) extra_specs1[utils.DISABLECOMPRESSION] = 'true' ref_specs1 = deepcopy(self.data.rep_extra_specs5) + ref_specs1['rdf_group_label'] = self.data.rdf_group_name_1 + ref_specs1['rdf_group_no'] = self.data.rdf_group_no_1 rep_extra_specs1 = self.common._get_replication_extra_specs( extra_specs1, rep_config) self.assertEqual(ref_specs1, rep_extra_specs1) # Path two - disable compression, not all flash ref_specs2 = deepcopy(self.data.rep_extra_specs5) + ref_specs2['rdf_group_label'] = self.data.rdf_group_name_1 + ref_specs2['rdf_group_no'] = self.data.rdf_group_no_1 with mock.patch.object(self.rest, 'is_compression_capable', return_value=False): rep_extra_specs2 = self.common._get_replication_extra_specs( @@ -303,15 +292,17 @@ class PowerMaxReplicationTest(test.TestCase): 'get_array_model_info', return_value=('PowerMax 2000', True)) def test_get_replication_extra_specs_powermax(self, mock_model): - rep_config = self.utils.get_replication_config( - [self.replication_device]) - rep_specs = deepcopy(self.data.rep_extra_specs2) + rep_config = self.data.rep_config_sync + rep_specs = deepcopy(self.data.rep_extra_specs5) extra_specs = deepcopy(self.extra_specs) # SLO not valid, both SLO and Workload set to NONE rep_specs['slo'] = None rep_specs['workload'] = None rep_specs['target_array_model'] = 'PowerMax 2000' + rep_specs['rdf_group_label'] = self.data.rdf_group_name_1 + rep_specs['rdf_group_no'] = self.data.rdf_group_no_1 + with mock.patch.object(self.provision, 'verify_slo_workload', return_value=(False, False)): rep_extra_specs = self.common._get_replication_extra_specs( @@ -328,8 +319,7 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual(rep_specs, rep_extra_specs) def test_get_secondary_stats(self): - rep_config = self.utils.get_replication_config( - [self.replication_device]) + rep_config = self.data.rep_config_sync array_map = self.common.get_attributes_from_cinder_config() finalarrayinfolist = self.common._get_slo_workload_combinations( array_map) @@ -342,14 +332,16 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual(ref_info, secondary_info) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', - return_value='') + return_value=tpd.PowerMaxData.volume_metadata) def test_replicate_group(self, mck_meta): volume_model_update = { 'id': self.data.test_volume.id, 'provider_location': self.data.test_volume.provider_location} + extra_specs = deepcopy(self.data.extra_specs_rep_enabled) + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync vols_model_update = self.common._replicate_group( self.data.array, [volume_model_update], - self.data.test_vol_grp_name, self.extra_specs) + self.data.test_vol_grp_name, extra_specs) ref_rep_data = {'array': self.data.remote_array, 'device_id': self.data.device_id2} ref_vol_update = { @@ -357,7 +349,7 @@ class PowerMaxReplicationTest(test.TestCase): 'provider_location': self.data.test_volume.provider_location, 'replication_driver_data': ref_rep_data, 'replication_status': fields.ReplicationStatus.ENABLED, - 'metadata': ''} + 'metadata': self.data.volume_metadata} # Decode string representations of dicts into dicts, because # the string representations are randomly ordered and therefore @@ -367,19 +359,34 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual(ref_vol_update, vols_model_update[0]) + @mock.patch.object( + utils.PowerMaxUtils, 'validate_non_replication_group_config') + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) + def test_create_group(self, mock_type, mock_cg_type, mck_validate): + ref_model_update = { + 'status': fields.GroupStatus.AVAILABLE} + model_update = self.common.create_group(None, self.data.test_group_1) + self.assertEqual(ref_model_update, model_update) + extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled] + mck_validate.assert_called_once_with(extra_specs_list) + + @mock.patch.object( + utils.PowerMaxUtils, 'validate_replication_group_config') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) - def test_create_replicaton_group(self, mock_type, mock_cg_type): + def test_create_replicaton_group( + self, mock_type, mock_cg_type, mck_validate): ref_model_update = { 'status': fields.GroupStatus.AVAILABLE, 'replication_status': fields.ReplicationStatus.ENABLED} model_update = self.common.create_group(None, self.data.test_group_1) self.assertEqual(ref_model_update, model_update) - # Replication mode is async - self.assertRaises(exception.InvalidInput, - self.async_driver.common.create_group, - None, self.data.test_group_1) + extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled] + mck_validate.assert_called_once_with( + self.common.rep_configs, extra_specs_list) def test_enable_replication(self): # Case 1: Group not replicated @@ -439,9 +446,9 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual({}, model_update) @mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group', - return_value=tpd.PowerMaxData.rdf_group_no) + return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', - return_value=tpd.PowerMaxData.rdf_group_no) + return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_failover(self, mck_find_vol_grp, @@ -456,9 +463,9 @@ class PowerMaxReplicationTest(test.TestCase): model_update['replication_status']) @mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group', - return_value=tpd.PowerMaxData.rdf_group_no) + return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', - return_value=tpd.PowerMaxData.rdf_group_no) + return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_failback(self, mck_find_vol_grp, @@ -488,13 +495,18 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual(fields.ReplicationStatus.ERROR, model_update['replication_status']) + @mock.patch.object(utils.PowerMaxUtils, 'get_volumetype_extra_specs', + return_value={utils.REPLICATION_DEVICE_BACKEND_ID: + tpd.PowerMaxData.rep_backend_id_sync}) @mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils', return_value=(tpd.PowerMaxData.array, {})) @mock.patch.object(common.PowerMaxCommon, '_cleanup_group_replication') @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) def test_delete_replication_group(self, mock_check, - mock_cleanup, mock_utils): - self.common._delete_group(self.data.test_rep_group, []) + mock_cleanup, mock_utils, mock_get): + group = self.data.test_rep_group + group['volume_types'] = self.data.test_volume_type_list + self.common._delete_group(group, []) mock_cleanup.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, @@ -531,7 +543,7 @@ class PowerMaxReplicationTest(test.TestCase): def test_cleanup_group_replication(self, mock_rm, mock_rm_reset): self.common._cleanup_group_replication( self.data.array, self.data.test_vol_grp_name, - [self.data.device_id], self.extra_specs) + [self.data.device_id], self.extra_specs, self.data.rep_config_sync) mock_rm.assert_called_once() @mock.patch.object(common.PowerMaxCommon, '_failover_replication', @@ -582,7 +594,7 @@ class PowerMaxReplicationTest(test.TestCase): external_ref = {u'source-name': u'00002'} volume = self.data.test_volume ref_model_update = { - 'metadata': {}, + 'metadata': {'BackendID': 'None'}, 'provider_location': six.text_type({ 'device_id': self.data.device_id, 'array': self.data.array}), @@ -640,13 +652,14 @@ class PowerMaxReplicationTest(test.TestCase): {})) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', - return_value=tpd.PowerMaxData.rep_extra_specs) + return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) def test_create_rep_volume(self, mck_initial, mck_create, mck_meta): ref_model_update = ( {'provider_location': six.text_type(self.data.provider_location), 'replication_driver_data': ( tpd.PowerMaxData.provider_location2), - 'metadata': {'device-meta-key-1': 'device-meta-value-1', + 'metadata': {'BackendID': self.data.rep_backend_id_sync, + 'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) @@ -657,7 +670,7 @@ class PowerMaxReplicationTest(test.TestCase): self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', - return_value='') + return_value={}) @mock.patch.object( common.PowerMaxCommon, '_create_cloned_volume', return_value=( @@ -666,14 +679,15 @@ class PowerMaxReplicationTest(test.TestCase): def test_create_rep_volume_from_snapshot(self, mck_meta, mck_clone_chk): ref_model_update = ( {'provider_location': six.text_type(self.data.provider_location), - 'metadata': ''}) + 'metadata': {'BackendID': self.data.rep_backend_id_sync}}) ref_model_update.update(self.data.replication_update) model_update = self.common.create_volume_from_snapshot( self.data.test_clone_volume, self.data.test_snapshot) self.assertEqual(ref_model_update, model_update) @mock.patch.object( - common.PowerMaxCommon, 'get_volume_metadata', return_value='') + common.PowerMaxCommon, 'get_volume_metadata', + return_value=tpd.PowerMaxData.volume_metadata) @mock.patch.object( common.PowerMaxCommon, '_create_cloned_volume', return_value=( @@ -681,9 +695,12 @@ class PowerMaxReplicationTest(test.TestCase): tpd.PowerMaxData.replication_update, {})) @mock.patch.object(common.PowerMaxCommon, '_clone_check') def test_cloned_rep_volume(self, mck_clone, mck_meta, mck_clone_chk): + metadata = deepcopy(self.data.volume_metadata) + metadata['BackendID'] = self.data.rep_backend_id_sync ref_model_update = { 'provider_location': six.text_type( - self.data.provider_location_clone), 'metadata': ''} + self.data.provider_location_clone), + 'metadata': metadata} ref_model_update.update(self.data.replication_update) model_update = self.common.create_cloned_volume( self.data.test_clone_volume, self.data.test_volume) @@ -703,36 +720,37 @@ class PowerMaxReplicationTest(test.TestCase): return_value=tpd.PowerMaxData.default_sg_re_enabled) @mock.patch.object( common.PowerMaxCommon, 'prepare_replication_details', - return_value=(True, tpd.PowerMaxData.rep_extra_specs_mgmt, {})) + return_value=(True, tpd.PowerMaxData.rep_extra_specs_rep_config, + {}, True)) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) def test_create_volume_rep_enabled( self, mck_slo, mck_prep, mck_get, mck_create, mck_protect, mck_set, mck_add): + volume = self.data.test_volume volume_name = self.data.volume_id volume_size = 1 extra_specs = deepcopy(self.data.rep_extra_specs) - extra_specs[utils.SLO] = utils.REP_ASYNC - self.common.rep_config = {'rdf_group_label': self.data.rdf_group_name} - + extra_specs['mode'] = utils.REP_ASYNC volume_dict, rep_update, rep_info_dict = self.common._create_volume( - volume_name, volume_size, extra_specs) + volume, volume_name, volume_size, extra_specs) self.assertEqual(self.data.provider_location, volume_dict) self.assertEqual(self.data.replication_update, rep_update) self.assertIsNone(rep_info_dict) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', - return_value=(tpd.PowerMaxData.rdf_group_no, None)) + return_value=(tpd.PowerMaxData.rdf_group_no_1, None)) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', side_effect=[False, True]) def test_remove_vol_and_cleanup_replication(self, mck_rep, mck_get): array = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs + extra_specs = deepcopy(self.data.extra_specs) + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync with mock.patch.object( self.masking, 'remove_and_reset_members') as mock_rm: @@ -747,6 +765,30 @@ class PowerMaxReplicationTest(test.TestCase): mock_clean.assert_called_once_with( array, rdf_group_no, device_id, extra_specs) + @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', + return_value=False) + def test_remove_vol_and_cleanup_replication_host_assisted_migration( + self, mck_rep): + array = self.data.array + device_id = self.data.device_id + volume = deepcopy(self.data.test_volume) + volume.migration_status = 'deleting' + metadata = deepcopy(self.data.volume_metadata) + metadata[utils.IS_RE_CAMEL] = 'False' + volume.metadata = metadata + volume_name = self.data.test_volume.name + extra_specs = deepcopy(self.data.rep_extra_specs) + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync + ref_extra_specs = deepcopy(extra_specs) + ref_extra_specs.pop(utils.IS_RE) + + with mock.patch.object( + self.masking, 'remove_and_reset_members') as mock_rm: + self.common._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs, volume) + mock_rm.assert_called_once_with( + array, volume, device_id, volume_name, ref_extra_specs, False) + @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @@ -756,7 +798,7 @@ class PowerMaxReplicationTest(test.TestCase): @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=({'mgmt_sg_name': tpd.PowerMaxData.rdf_managed_async_grp, - 'rdf_group_no': tpd.PowerMaxData.rdf_group_no}, True)) + 'rdf_group_no': tpd.PowerMaxData.rdf_group_no_1}, True)) def test_migrate_volume_success_rep_to_no_rep( self, mck_break, mck_retype, mck_resume, mck_get): array_id = self.data.array @@ -786,6 +828,7 @@ class PowerMaxReplicationTest(test.TestCase): target_slo, target_workload, target_extra_specs) self.assertTrue(success) + @mock.patch.object(common.PowerMaxCommon, '_sync_check') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( @@ -802,7 +845,7 @@ class PowerMaxReplicationTest(test.TestCase): 'remote_array': tpd.PowerMaxData.remote_array}, tpd.PowerMaxData.rep_extra_specs, False)) def test_migrate_volume_success_no_rep_to_rep( - self, mck_configure, mck_retype, mck_protect, mck_get): + self, mck_configure, mck_retype, mck_protect, mck_get, mck_check): self.common.rep_config = {'mode': utils.REP_SYNC, 'array': self.data.array} array_id = self.data.array @@ -812,7 +855,9 @@ class PowerMaxReplicationTest(test.TestCase): target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name - extra_specs = self.data.extra_specs + extra_specs = deepcopy(self.data.extra_specs) + rep_config_sync = deepcopy(self.data.rep_config_sync) + rep_config_sync[utils.RDF_CONS_EXEMPT] = False new_type = {'extra_specs': self.data.rep_extra_specs} target_extra_specs = deepcopy(new_type['extra_specs']) @@ -821,7 +866,8 @@ class PowerMaxReplicationTest(test.TestCase): utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], - utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC}) + utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC, + utils.REP_CONFIG: rep_config_sync}) success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, @@ -851,6 +897,8 @@ class PowerMaxReplicationTest(test.TestCase): target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name + rep_config_sync = deepcopy(self.data.rep_config_sync) + rep_config_sync[utils.RDF_CONS_EXEMPT] = False extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.SLO] = self.data.slo_diamond new_type = {'extra_specs': self.data.rep_extra_specs} @@ -861,7 +909,8 @@ class PowerMaxReplicationTest(test.TestCase): utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], - utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC}) + utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC, + utils.REP_CONFIG: rep_config_sync}) success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, @@ -885,7 +934,8 @@ class PowerMaxReplicationTest(test.TestCase): volume_name = self.data.volume_id remote_array = self.data.remote_array target_device_id = self.data.device_id2 - extra_specs = self.data.rep_extra_specs + extra_specs = deepcopy(self.data.rep_extra_specs) + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync group_name = self.data.rdf_managed_async_grp get_create_grp_calls = [ @@ -931,10 +981,11 @@ class PowerMaxReplicationTest(test.TestCase): remote_array = self.data.remote_array device_id = self.data.device_id target_device_id = self.data.device_id2 - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 volume_name = self.data.volume_id rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.REP_MODE] = utils.REP_METRO + rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro sg_name = self.data.default_sg_re_enabled async_grp = self.data.rdf_managed_async_grp pair_state = utils.RDF_SYNC_STATE @@ -949,7 +1000,7 @@ class PowerMaxReplicationTest(test.TestCase): rdf_group_no, volume_name, rep_extra_specs) mck_paired.assert_called_once_with( array, remote_array, device_id, target_device_id) - mck_get_rdf.assert_called_once_with(self.common.rep_config) + mck_get_rdf.assert_called_once_with(self.data.rep_config_metro) mck_get_sg.assert_called_once_with(array, device_id) mck_break.assert_called_once_with( array, device_id, sg_name, rdf_group_no, rep_extra_specs, @@ -986,7 +1037,7 @@ class PowerMaxReplicationTest(test.TestCase): remote_array = self.data.remote_array device_id = self.data.device_id target_device_id = self.data.device_id2 - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 volume_name = self.data.volume_id rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.REP_MODE] = utils.REP_SYNC @@ -1022,15 +1073,16 @@ class PowerMaxReplicationTest(test.TestCase): return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object( common.PowerMaxCommon, 'get_rdf_details', - return_value=(tpd.PowerMaxData.rdf_group_no, + return_value=(tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.remote_array)) def test_cleanup_rdf_device_pair_vol_cnt_exception( self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list): array = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_MODE] = utils.REP_SYNC + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync self.assertRaises( exception.VolumeBackendAPIException, self.common.cleanup_rdf_device_pair, array, rdf_group_no, @@ -1066,19 +1118,20 @@ class PowerMaxReplicationTest(test.TestCase): return_value=tpd.PowerMaxData.rep_extra_specs_mgmt) @mock.patch.object( common.PowerMaxCommon, 'get_rdf_details', - return_value=(tpd.PowerMaxData.rdf_group_no, + return_value=(tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.remote_array)) def test_cleanup_rdf_device_pair( self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list, mck_wait, mck_get_mgmt_grp, mck_get_num_vols, mck_suspend, mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume): array = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id target_device_id = self.data.device_id2 extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_MODE] = utils.REP_METRO - rep_extra_specs = self.data.rep_extra_specs_mgmt + extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro + rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt) rdf_mgmt_grp = self.data.rdf_managed_async_grp self.common.cleanup_rdf_device_pair( @@ -1101,24 +1154,25 @@ class PowerMaxReplicationTest(test.TestCase): return_value=tpd.PowerMaxData.rep_extra_specs_mgmt) def test_prepare_replication_details(self, mck_get_rep, mck_get_vols): extra_specs = deepcopy(self.data.extra_specs_rep_enabled) - extra_specs['rep_mode'] = 'Synchronous' extra_specs['workload'] = 'NONE' - + extra_specs['rep_mode'] = utils.REP_SYNC + extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync rep_extra_specs = self.data.rep_extra_specs_mgmt ref_info_dict = { 'initial_device_list': ['00001', '00002'], 'local_array': self.data.array, - 'rdf_group_no': self.data.rdf_group_no, + 'rdf_group_no': self.data.rdf_group_no_1, 'remote_array': self.data.remote_array, 'rep_mode': utils.REP_SYNC, 'service_level': self.data.slo_diamond, 'sg_name': self.data.default_sg_no_slo_re_enabled, 'sync_interval': 2, 'sync_retries': 200} - rep_first_vol, resp_extra_specs, rep_info_dict = ( + rep_first_vol, resp_extra_specs, rep_info_dict, rdfg_empty = ( self.common.prepare_replication_details(extra_specs)) self.assertFalse(rep_first_vol) self.assertEqual(rep_extra_specs, resp_extra_specs) self.assertEqual(ref_info_dict, rep_info_dict) + self.assertFalse(rdfg_empty) @mock.patch.object( rest.PowerMaxRest, 'srdf_protect_storage_group') @@ -1135,9 +1189,11 @@ class PowerMaxReplicationTest(test.TestCase): volume_dict['storage_group'], rep_extra_specs['slo'], extra_specs) def test_gather_replication_updates(self): - self.common.rep_config = {'rdf_group_label': self.data.rdf_group_name} + self.common.rep_config = { + 'rdf_group_label': self.data.rdf_group_name_1} extra_specs = self.data.rep_extra_specs - rep_extra_specs = self.data.rep_extra_specs_mgmt + rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt) + rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async volume_dict = {'storage_group': self.data.rdf_managed_async_grp, 'remote_device_id': self.data.device_id2, 'device_uuid': self.data.volume_id} diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py index ec1a4d0c67f..f658c3d7921 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py @@ -506,6 +506,50 @@ class PowerMaxRestTest(test.TestCase): self.data.failed_resource, device_id, self.data.extra_specs) + @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') + def test_remove_vol_from_sg_force_true(self, mck_wait): + device_id = self.data.device_id + extra_specs = deepcopy(self.data.extra_specs) + extra_specs[utils.FORCE_VOL_REMOVE] = True + expected_payload = ( + {"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "removeVolumeParam": { + "volumeId": [device_id], + "remoteSymmSGInfoParam": { + "force": "true"}}}}) + with mock.patch.object( + self.rest, 'modify_storage_group', return_value=( + 200, tpd.PowerMaxData.job_list)) as mck_mod: + self.rest.remove_vol_from_sg( + self.data.array, self.data.storagegroup_name_f, device_id, + extra_specs) + mck_mod.assert_called_with( + self.data.array, self.data.storagegroup_name_f, + expected_payload) + + @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') + def test_remove_vol_from_sg_force_false(self, mck_wait): + device_id = self.data.device_id + extra_specs = deepcopy(self.data.extra_specs) + extra_specs.pop(utils.FORCE_VOL_REMOVE, None) + expected_payload = ( + {"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "removeVolumeParam": { + "volumeId": [device_id], + "remoteSymmSGInfoParam": { + "force": "false"}}}}) + with mock.patch.object( + self.rest, 'modify_storage_group', return_value=( + 200, tpd.PowerMaxData.job_list)) as mck_mod: + self.rest.remove_vol_from_sg( + self.data.array, self.data.storagegroup_name_f, device_id, + extra_specs) + mck_mod.assert_called_with( + self.data.array, self.data.storagegroup_name_f, + expected_payload) + def test_get_vmax_default_storage_group(self): ref_storage_group = self.data.sg_details[0] ref_sg_name = self.data.defaultstoragegroup_name @@ -620,8 +664,8 @@ class PowerMaxRestTest(test.TestCase): array = self.data.array device_id = self.data.device_id new_size = '3' - extra_specs = self.data.extra_specs, - rdfg_num = self.data.rdf_group_no + extra_specs = self.data.extra_specs + rdfg_num = self.data.rdf_group_no_1 extend_vol_payload = {'executionOption': 'ASYNCHRONOUS', 'editVolumeActionParam': { @@ -1364,15 +1408,15 @@ class PowerMaxRestTest(test.TestCase): def test_set_storagegroup_srp(self, mock_mod): self.rest.set_storagegroup_srp( self.data.array, self.data.test_vol_grp_name, - self.data.srp2, self.data.extra_specs) + self.data.srp, self.data.extra_specs) mock_mod.assert_called_once() def test_get_rdf_group(self): with mock.patch.object(self.rest, 'get_resource') as mock_get: - self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no) + self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no_1) mock_get.assert_called_once_with( self.data.array, 'replication', 'rdf_group', - self.data.rdf_group_no) + self.data.rdf_group_no_1) def test_get_rdf_group_list(self): rdf_list = self.rest.get_rdf_group_list(self.data.array) @@ -1405,17 +1449,17 @@ class PowerMaxRestTest(test.TestCase): def test_get_rdf_group_number(self): rdfg_num = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) - self.assertEqual(self.data.rdf_group_no, rdfg_num) + self.data.array, self.data.rdf_group_name_1) + self.assertEqual(self.data.rdf_group_no_1, rdfg_num) with mock.patch.object(self.rest, 'get_rdf_group_list', return_value=None): rdfg_num2 = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) + self.data.array, self.data.rdf_group_name_1) self.assertIsNone(rdfg_num2) with mock.patch.object(self.rest, 'get_rdf_group', return_value=None): rdfg_num3 = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) + self.data.array, self.data.rdf_group_name_1) self.assertIsNone(rdfg_num3) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', @@ -1427,26 +1471,26 @@ class PowerMaxRestTest(test.TestCase): # First volume out, Metro use bias not set act_payload_1 = self.rest.get_metro_payload_info( - self.data.array, payload_in.copy(), self.data.rdf_group_no, {}, + self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {}, True) self.assertEqual(payload_in, act_payload_1) # First volume out, Metro use bias set act_payload_2 = self.rest.get_metro_payload_info( - self.data.array, payload_in.copy(), self.data.rdf_group_no, + self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'metro_bias': True}, True) self.assertEqual('true', act_payload_2['metroBias']) # Not first vol in RDFG, consistency exempt not set act_payload_3 = self.rest.get_metro_payload_info( - self.data.array, payload_in.copy(), self.data.rdf_group_no, + self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'exempt': False}, False) ref_payload_3 = {'rdfMode': 'Active', 'rdfType': 'RDF1'} self.assertEqual(ref_payload_3, act_payload_3) # Not first vol in RDFG, consistency exempt set act_payload_4 = self.rest.get_metro_payload_info( - self.data.array, payload_in.copy(), self.data.rdf_group_no, + self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'exempt': True}, True) ref_payload_4 = {'rdfType': 'RDF1', 'exempt': 'true', 'rdfMode': 'Active'} @@ -1504,17 +1548,17 @@ class PowerMaxRestTest(test.TestCase): def test_get_storagegroup_rdf_details(self): details = self.rest.get_storagegroup_rdf_details( self.data.array, self.data.test_vol_grp_name, - self.data.rdf_group_no) + self.data.rdf_group_no_1) self.assertEqual(self.data.sg_rdf_details[0], details) def test_verify_rdf_state(self): verify1 = self.rest._verify_rdf_state( self.data.array, self.data.test_vol_grp_name, - self.data.rdf_group_no, 'Failover') + self.data.rdf_group_no_1, 'Failover') self.assertTrue(verify1) verify2 = self.rest._verify_rdf_state( self.data.array, self.data.test_fo_vol_group, - self.data.rdf_group_no, 'Establish') + self.data.rdf_group_no_1, 'Establish') self.assertTrue(verify2) def test_delete_storagegroup_rdf(self): @@ -1522,7 +1566,7 @@ class PowerMaxRestTest(test.TestCase): self.rest, 'delete_resource') as mock_del: self.rest.delete_storagegroup_rdf( self.data.array, self.data.test_vol_grp_name, - self.data.rdf_group_no) + self.data.rdf_group_no_1) mock_del.assert_called_once() def test_is_next_gen_array(self): @@ -1731,17 +1775,18 @@ class PowerMaxRestTest(test.TestCase): return_value=tpd.PowerMaxData.sg_rdf_group_details) def test_get_storage_group_rdf_group_state(self, mck_get): ref_get_resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % { - 'sg': self.data.test_vol_grp_name, 'rdfg': self.data.rdf_group_no}) + 'sg': self.data.test_vol_grp_name, + 'rdfg': self.data.rdf_group_no_1}) states = self.rest.get_storage_group_rdf_group_state( self.data.array, self.data.test_vol_grp_name, - self.data.rdf_group_no) + self.data.rdf_group_no_1) mck_get.assert_called_once_with( self.data.array, 'replication', ref_get_resource) self.assertEqual(states, [utils.RDF_SUSPENDED_STATE]) @mock.patch.object(rest.PowerMaxRest, 'get_resource') def test_get_rdf_pair_volume(self, mck_get): - rdf_grp_no = self.data.rdf_group_no + rdf_grp_no = self.data.rdf_group_no_1 device_id = self.data.device_id array = self.data.array ref_get_resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % { @@ -1755,11 +1800,12 @@ class PowerMaxRestTest(test.TestCase): def test_srdf_protect_storage_group(self, mck_create, mck_wait): array_id = self.data.array remote_array_id = self.data.remote_array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 replication_mode = utils.REP_METRO sg_name = self.data.default_sg_re_enabled service_level = 'Diamond' - extra_specs = self.data.rep_extra_specs_metro + extra_specs = deepcopy(self.data.rep_extra_specs) + extra_specs[utils.METROBIAS] = True remote_sg = self.data.rdf_managed_async_grp ref_payload = { @@ -1767,8 +1813,8 @@ class PowerMaxRestTest(test.TestCase): 'replicationMode': 'Active', 'remoteSLO': service_level, 'remoteSymmId': remote_array_id, 'rdfgNumber': rdf_group_no, 'remoteStorageGroupName': remote_sg, 'establish': 'true'} - ref_resource = ('storagegroup/%(sg_name)s/rdf_group' % { - 'sg_name': sg_name}) + ref_resource = ('storagegroup/%(sg_name)s/rdf_group' % + {'sg_name': sg_name}) self.rest.srdf_protect_storage_group( array_id, remote_array_id, rdf_group_no, replication_mode, @@ -1781,7 +1827,7 @@ class PowerMaxRestTest(test.TestCase): return_value=(200, 'job')) def test_srdf_modify_group(self, mck_modify, mck_wait): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled payload = {'executionOption': 'ASYNCHRONOUS', 'action': 'Suspend'} extra_specs = self.data.rep_extra_specs @@ -1800,7 +1846,7 @@ class PowerMaxRestTest(test.TestCase): return_value=(200, 'job')) def test_srdf_modify_group_async_call_false(self, mck_modify, mck_wait): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled payload = {'action': 'Suspend'} extra_specs = self.data.rep_extra_specs @@ -1819,7 +1865,7 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_CONSISTENT_STATE]) def test_srdf_suspend_replication(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1836,7 +1882,7 @@ class PowerMaxRestTest(test.TestCase): def test_srdf_suspend_replication_already_suspended(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1849,9 +1895,11 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_SUSPENDED_STATE]) def test_srdf_resume_replication(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs + rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async + rep_extra_specs[utils.REP_MODE] = utils.REP_ASYNC self.rest.srdf_resume_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) @@ -1864,9 +1912,10 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_SUSPENDED_STATE]) def test_srdf_resume_replication_metro(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled - rep_extra_specs = self.data.rep_extra_specs_metro + rep_extra_specs = deepcopy(self.data.rep_extra_specs_metro) + rep_extra_specs[utils.REP_MODE] = utils.REP_METRO self.rest.srdf_resume_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) @@ -1881,7 +1930,7 @@ class PowerMaxRestTest(test.TestCase): def test_srdf_resume_replication_already_resumed(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1894,7 +1943,7 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_CONSISTENT_STATE]) def test_srdf_establish_replication(self, mck_get, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1912,7 +1961,7 @@ class PowerMaxRestTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') def test_srdf_failover_group(self, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1925,7 +1974,7 @@ class PowerMaxRestTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') def test_srdf_failback_group(self, mck_modify): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs @@ -1961,7 +2010,7 @@ class PowerMaxRestTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'delete_resource') def test_srdf_delete_device_pair(self, mck_del): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id ref_resource = ('%(rdfg)s/volume/%(dev)s' % { 'rdfg': rdf_group_no, 'dev': device_id}) @@ -1981,11 +2030,12 @@ class PowerMaxRestTest(test.TestCase): self, mck_create, mck_wait, mck_get): array_id = self.data.array remote_array = self.data.remote_array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 mode = utils.REP_ASYNC device_id = self.data.device_id tgt_device_id = self.data.device_id2 rep_extra_specs = self.data.rep_extra_specs + rep_extra_specs['array'] = remote_array ref_payload = { 'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode, @@ -2015,11 +2065,12 @@ class PowerMaxRestTest(test.TestCase): self, mck_create, mck_wait, mck_get): array_id = self.data.array remote_array = self.data.remote_array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 mode = utils.REP_SYNC device_id = self.data.device_id tgt_device_id = self.data.device_id2 rep_extra_specs = self.data.rep_extra_specs + rep_extra_specs[utils.ARRAY] = remote_array ref_payload = { 'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode, @@ -2043,7 +2094,7 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_CONSISTENT_STATE]) def test_wait_for_rdf_group_sync(self, mck_get): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 2 @@ -2057,7 +2108,7 @@ class PowerMaxRestTest(test.TestCase): return_value=[utils.RDF_SYNCINPROG_STATE]) def test_wait_for_rdf_group_sync_fail(self, mck_get): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 1 @@ -2071,7 +2122,7 @@ class PowerMaxRestTest(test.TestCase): return_value=tpd.PowerMaxData.rdf_group_vol_details) def test_wait_for_rdf_pair_sync(self, mck_get): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 2 @@ -2086,7 +2137,7 @@ class PowerMaxRestTest(test.TestCase): return_value=tpd.PowerMaxData.rdf_group_vol_details_not_synced) def test_wait_for_rdf_pair_sync_fail(self, mck_get): array_id = self.data.array - rdf_group_no = self.data.rdf_group_no + rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 1 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py index 80efa709f9b..dc1c8c90678 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py @@ -41,10 +41,12 @@ class PowerMaxUtilsTest(test.TestCase): self.data = tpd.PowerMaxData() volume_utils.get_max_over_subscription_ratio = mock.Mock() super(PowerMaxUtilsTest, self).setUp() + self.replication_device = self.data.sync_rep_device configuration = tpfo.FakeConfiguration( None, 'UtilsTests', 1, 1, san_ip='1.1.1.1', san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc', - san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i]) + san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i], + replication_device=self.replication_device) rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver(configuration=configuration) @@ -268,13 +270,13 @@ class PowerMaxUtilsTest(test.TestCase): rep_device_list1 = [{'target_device_id': self.data.remote_array, 'remote_pool': self.data.srp, 'remote_port_group': self.data.port_group_name_f, - 'rdf_group_label': self.data.rdf_group_name}] - rep_config1 = self.utils.get_replication_config(rep_device_list1) + 'rdf_group_label': self.data.rdf_group_name_1}] + rep_config1 = self.utils.get_replication_config(rep_device_list1)[0] self.assertEqual(self.data.remote_array, rep_config1['array']) # Success, allow_extend true rep_device_list2 = rep_device_list1 rep_device_list2[0]['allow_extend'] = 'true' - rep_config2 = self.utils.get_replication_config(rep_device_list2) + rep_config2 = self.utils.get_replication_config(rep_device_list2)[0] self.assertTrue(rep_config2['allow_extend']) # No rep_device_list rep_device_list3 = [] @@ -288,34 +290,50 @@ class PowerMaxUtilsTest(test.TestCase): # Success, mode is async rep_device_list5 = rep_device_list2 rep_device_list5[0]['mode'] = 'async' - rep_config5 = self.utils.get_replication_config(rep_device_list5) + rep_config5 = self.utils.get_replication_config(rep_device_list5)[0] self.assertEqual(utils.REP_ASYNC, rep_config5['mode']) # Success, mode is metro - no other options set rep_device_list6 = rep_device_list5 rep_device_list6[0]['mode'] = 'metro' - rep_config6 = self.utils.get_replication_config(rep_device_list6) + rep_config6 = self.utils.get_replication_config(rep_device_list6)[0] self.assertFalse(rep_config6['metro_bias']) # Success, mode is metro - metro options true rep_device_list7 = rep_device_list6 - rep_device_list6[0].update({'metro_use_bias': 'true'}) - rep_config7 = self.utils.get_replication_config(rep_device_list7) + rep_device_list7[0].update({'metro_use_bias': 'true'}) + rep_config7 = self.utils.get_replication_config(rep_device_list7)[0] self.assertTrue(rep_config7['metro_bias']) + # Success, no backend id + self.assertIsNone(rep_config7.get(utils.BACKEND_ID)) + # Success, backend id + rep_device_list8 = rep_device_list6 + rep_device_list8[0].update( + {utils.BACKEND_ID: self.data.rep_backend_id_sync}) + rep_config8 = self.utils.get_replication_config(rep_device_list8)[0] + self.assertEqual( + self.data.rep_backend_id_sync, rep_config8[utils.BACKEND_ID]) + # Success, multi-rep + multi_rep_device_list = self.data.multi_rep_device + multi_rep_config = self.utils.get_replication_config( + multi_rep_device_list) + self.assertTrue(len(multi_rep_config) > 1) + for rep_config in multi_rep_config: + self.assertEqual(rep_config['array'], self.data.remote_array) def test_get_replication_config_sync_retries_intervals(self): # Default sync interval & retry values rep_device_list1 = [{'target_device_id': self.data.remote_array, 'remote_pool': self.data.srp, 'remote_port_group': self.data.port_group_name_f, - 'rdf_group_label': self.data.rdf_group_name}] + 'rdf_group_label': self.data.rdf_group_name_1}] - rep_config1 = self.utils.get_replication_config(rep_device_list1) + rep_config1 = self.utils.get_replication_config(rep_device_list1)[0] self.assertEqual(200, rep_config1['sync_retries']) self.assertEqual(3, rep_config1['sync_interval']) # User set interval & retry values rep_device_list2 = deepcopy(rep_device_list1) rep_device_list2[0].update({'sync_retries': 300, 'sync_interval': 1}) - rep_config2 = self.utils.get_replication_config(rep_device_list2) + rep_config2 = self.utils.get_replication_config(rep_device_list2)[0] self.assertEqual(300, rep_config2['sync_retries']) self.assertEqual(1, rep_config2['sync_interval']) @@ -421,7 +439,7 @@ class PowerMaxUtilsTest(test.TestCase): self.assertEqual('-RM', metro_prefix) def test_get_rdf_management_group_name(self): - rep_config = {'rdf_group_label': self.data.rdf_group_name, + rep_config = {'rdf_group_label': self.data.rdf_group_name_1, 'mode': utils.REP_ASYNC} grp_name = self.utils.get_rdf_management_group_name(rep_config) self.assertEqual(self.data.rdf_managed_async_grp, grp_name) @@ -438,6 +456,7 @@ class PowerMaxUtilsTest(test.TestCase): def test_does_vol_need_rdf_management_group(self): extra_specs = deepcopy(self.data.rep_extra_specs) + extra_specs['rep_mode'] = utils.REP_SYNC self.assertFalse(self.utils.does_vol_need_rdf_management_group( extra_specs)) extra_specs[utils.REP_MODE] = utils.REP_ASYNC @@ -459,9 +478,36 @@ class PowerMaxUtilsTest(test.TestCase): self.assertEqual(expected_snap_name, updated_name) def test_change_replication(self): - new_type = {'extra_specs': self.data.extra_specs_rep_enabled} - self.assertFalse(self.utils.change_replication(True, new_type)) - self.assertTrue(self.utils.change_replication(False, new_type)) + non_rep_extra_specs = self.data.extra_specs + rep_extra_specs = self.data.extra_specs_rep_enabled + change_rep = self.utils.change_replication( + non_rep_extra_specs, rep_extra_specs) + self.assertTrue(change_rep) + + def test_change_replication_different_backend_id(self): + rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled) + rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' + rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled) + rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'B' + change_rep = self.utils.change_replication( + rep_extra_specs_a, rep_extra_specs_b) + self.assertTrue(change_rep) + + def test_change_replication_no_change(self): + non_rep_extra_specs_a = self.data.extra_specs + non_rep_extra_specs_b = self.data.extra_specs + change_rep = self.utils.change_replication( + non_rep_extra_specs_a, non_rep_extra_specs_b) + self.assertFalse(change_rep) + + def test_change_replication_no_change_same_backend_id(self): + rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled) + rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' + rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled) + rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' + change_rep = self.utils.change_replication( + rep_extra_specs_a, rep_extra_specs_b) + self.assertFalse(change_rep) def test_get_child_sg_name(self): host_name = 'HostX' @@ -1158,8 +1204,8 @@ class PowerMaxUtilsTest(test.TestCase): def test_get_unique_device_ids_from_lists(self): list_a = ['00001', '00002', '00003'] list_b = ['00002', '00003', '00004'] - unique_ids = self.utils.get_unique_device_ids_from_lists(list_a, - list_b) + unique_ids = self.utils.get_unique_device_ids_from_lists( + list_a, list_b) self.assertEqual(['00004'], unique_ids) def test_update_payload_for_rdf_vol_create(self): @@ -1195,23 +1241,305 @@ class PowerMaxUtilsTest(test.TestCase): tgt_extra_specs = deepcopy(self.data.rep_extra_specs) tgt_extra_specs['rep_mode'] = utils.REP_METRO - self.assertTrue(self.utils.is_retype_supported(volume, src_extra_specs, - tgt_extra_specs)) + rep_configs = self.data.multi_rep_config_list + src_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( + self.data.rep_backend_id_sync) + tgt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( + self.data.rep_backend_id_metro) + + self.assertTrue(self.utils.is_retype_supported( + volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type not replicated, target type Metro replicated, # volume is attached, host-assisted retype not supported volume.attach_status = 'attached' self.assertFalse(self.utils.is_retype_supported( - volume, src_extra_specs, tgt_extra_specs)) + volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type Async replicated, target type Metro replicated, # volume is attached, host-assisted retype not supported src_extra_specs['rep_mode'] = utils.REP_ASYNC self.assertFalse(self.utils.is_retype_supported( - volume, src_extra_specs, tgt_extra_specs)) + volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type Metro replicated, target type Metro replicated, # volume is attached, host-assisted retype supported src_extra_specs['rep_mode'] = utils.REP_METRO self.assertTrue(self.utils.is_retype_supported( - volume, src_extra_specs, tgt_extra_specs)) + volume, src_extra_specs, tgt_extra_specs, rep_configs)) + + def test_validate_multiple_rep_device(self): + self.utils.validate_multiple_rep_device(self.data.multi_rep_device) + + def test_validate_multiple_rep_device_non_unique_backend_id(self): + rep_devices = self.data.multi_rep_device + rep_devices[0][utils.BACKEND_ID] = rep_devices[1][utils.BACKEND_ID] + self.assertRaises( + exception.InvalidConfigurationValue, + self.utils.validate_multiple_rep_device, + self.data.multi_rep_device) + + def test_validate_multiple_rep_device_missing_backend_id(self): + rep_devices = self.data.multi_rep_device + rep_devices[0].pop(utils.BACKEND_ID) + self.assertRaises( + exception.InvalidConfigurationValue, + self.utils.validate_multiple_rep_device, + self.data.multi_rep_device) + + def test_validate_multiple_rep_device_non_unique_rdf_label(self): + rep_devices = self.data.multi_rep_device + rep_devices[0]['rdf_group_label'] = rep_devices[1]['rdf_group_label'] + self.assertRaises( + exception.InvalidConfigurationValue, + self.utils.validate_multiple_rep_device, + self.data.multi_rep_device) + + def test_validate_multiple_rep_device_non_unique_rdf_modes(self): + rep_devices = [self.data.rep_dev_1, self.data.rep_dev_2] + rep_devices[1]['mode'] = rep_devices[0]['mode'] + self.assertRaises( + exception.InvalidConfigurationValue, + self.utils.validate_multiple_rep_device, + rep_devices) + + def test_validate_multiple_rep_device_multiple_targets(self): + rep_devices = [self.data.rep_dev_1, self.data.rep_dev_2] + rep_devices[1]['target_device_id'] = 1234 + self.assertRaises( + exception.InvalidConfigurationValue, + self.utils.validate_multiple_rep_device, + rep_devices) + + def test_get_rep_config_single_rep(self): + rep_configs = self.data.sync_rep_config_list + rep_config = self.utils.get_rep_config('test', rep_configs) + self.assertEqual(rep_config, rep_configs[0]) + + def test_get_rep_config_multi_rep(self): + rep_configs = self.data.multi_rep_config_list + backend_id = rep_configs[0][utils.BACKEND_ID] + rep_device = self.utils.get_rep_config(backend_id, rep_configs) + self.assertEqual(rep_configs[0], rep_device) + + def test_get_rep_config_fail(self): + rep_configs = self.data.multi_rep_config_list + backend_id = 'invalid key' + self.assertRaises(exception.InvalidInput, self.utils.get_rep_config, + backend_id, rep_configs) + + def test_get_replication_targets(self): + rep_targets_expected = [self.data.remote_array] + rep_configs = self.data.multi_rep_config_list + rep_targets_actual = self.utils.get_replication_targets(rep_configs) + self.assertEqual(rep_targets_expected, rep_targets_actual) + + def test_validate_failover_request_success(self): + is_failed_over = False + failover_backend_id = self.data.rep_backend_id_sync + rep_configs = self.data.multi_rep_config_list + is_valid, msg = self.utils.validate_failover_request( + is_failed_over, failover_backend_id, rep_configs) + self.assertTrue(is_valid) + self.assertEqual("", msg) + + def test_validate_failover_request_already_failed_over(self): + is_failed_over = True + failover_backend_id = self.data.rep_backend_id_sync + rep_configs = self.data.multi_rep_config_list + is_valid, msg = self.utils.validate_failover_request( + is_failed_over, failover_backend_id, rep_configs) + self.assertFalse(is_valid) + expected_msg = ('Cannot failover, the backend is already in a failed ' + 'over state, if you meant to failback, please add ' + '--backend_id default to the command.') + self.assertEqual(expected_msg, msg) + + def test_validate_failover_request_invalid_failback(self): + is_failed_over = False + failover_backend_id = 'default' + rep_configs = self.data.multi_rep_config_list + is_valid, msg = self.utils.validate_failover_request( + is_failed_over, failover_backend_id, rep_configs) + self.assertFalse(is_valid) + expected_msg = ('Cannot failback, backend is not in a failed over ' + 'state. If you meant to failover, please either omit ' + 'the --backend_id parameter or use the --backend_id ' + 'parameter with a valid backend id.') + self.assertEqual(expected_msg, msg) + + def test_validate_failover_request_no_backend_id_multi_rep(self): + is_failed_over = False + failover_backend_id = None + rep_configs = self.data.multi_rep_config_list + is_valid, msg = self.utils.validate_failover_request( + is_failed_over, failover_backend_id, rep_configs) + self.assertFalse(is_valid) + expected_msg = ('Cannot failover, no backend_id provided while ' + 'multiple replication devices are defined in ' + 'cinder.conf, please provide a backend_id ' + 'which will act as new primary array by ' + 'appending --backend_id to your command.') + self.assertEqual(expected_msg, msg) + + def test_validate_failover_request_incorrect_backend_id_multi_rep(self): + is_failed_over = False + failover_backend_id = 'invalid_id' + rep_configs = self.data.multi_rep_config_list + self.assertRaises(exception.InvalidInput, + self.utils.validate_failover_request, + is_failed_over, failover_backend_id, rep_configs) + + def test_validate_replication_group_config_success(self): + rep_configs = deepcopy(self.data.multi_rep_config_list) + extra_specs = deepcopy( + self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) + extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( + self.data.rep_backend_id_sync) + self.utils.validate_replication_group_config( + rep_configs, [extra_specs]) + + def test_validate_replication_group_config_no_rep_configured(self): + rep_configs = None + extra_specs_list = [ + self.data.vol_type_extra_specs_rep_enabled_backend_id_sync] + self.assertRaises(exception.InvalidInput, + self.utils.validate_replication_group_config, + rep_configs, extra_specs_list) + try: + self.utils.validate_replication_group_config( + rep_configs, extra_specs_list) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: No replication devices are defined ' + 'in cinder.conf, can not enable volume group replication.') + self.assertEqual(expected_msg, e.msg) + + def test_validate_replication_group_config_vol_type_not_rep_enabled(self): + rep_configs = self.data.multi_rep_config_list + extra_specs_list = [self.data.vol_type_extra_specs] + self.assertRaises(exception.InvalidInput, + self.utils.validate_replication_group_config, + rep_configs, extra_specs_list) + try: + self.utils.validate_replication_group_config( + rep_configs, extra_specs_list) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: Replication is not enabled for a ' + 'Volume Type, all Volume Types in a replication enabled ' + 'Volume Group must have replication enabled.') + self.assertEqual(expected_msg, e.msg) + + def test_validate_replication_group_config_cant_get_rep_config(self): + rep_configs = self.data.multi_rep_config_list + vt_extra_specs = ( + self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) + vt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = 'invalid' + extra_specs_list = [vt_extra_specs] + self.assertRaises(exception.InvalidInput, + self.utils.validate_replication_group_config, + rep_configs, extra_specs_list) + try: + self.utils.validate_replication_group_config( + rep_configs, extra_specs_list) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: Unable to determine which ' + 'rep_device to use from cinder.conf. Could not validate ' + 'volume types being added to group.') + self.assertEqual(expected_msg, e.msg) + + def test_validate_replication_group_config_non_sync_mode(self): + rep_configs = self.data.multi_rep_config_list + extra_specs_list = [ + self.data.vol_type_extra_specs_rep_enabled_backend_id_async] + self.assertRaises(exception.InvalidInput, + self.utils.validate_replication_group_config, + rep_configs, extra_specs_list) + try: + self.utils.validate_replication_group_config( + rep_configs, extra_specs_list) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: Replication for Volume Type is not ' + 'set to Synchronous. Only Synchronous can be used with ' + 'replication groups') + self.assertEqual(expected_msg, e.msg) + + @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config') + def test_validate_replication_group_config_multiple_rep_backend_ids( + self, mck_get): + side_effect_list = [ + self.data.rep_config_sync, self.data.rep_config_sync_2] + mck_get.side_effect = side_effect_list + rep_configs = self.data.multi_rep_config_list + ex_specs_1 = deepcopy( + self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) + ex_specs_2 = deepcopy( + self.data.vol_type_extra_specs_rep_enabled_backend_id_sync_2) + extra_specs_list = [ex_specs_1, ex_specs_2] + self.assertRaises(exception.InvalidInput, + self.utils.validate_replication_group_config, + rep_configs, extra_specs_list) + mck_get.side_effect = side_effect_list + try: + self.utils.validate_replication_group_config( + rep_configs, extra_specs_list) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: Multiple replication backend ids ' + 'detected please ensure only a single replication device ' + '(backend_id) is used for all Volume Types in a Volume ' + 'Group.') + self.assertEqual(expected_msg, e.msg) + + def test_validate_non_replication_group_config_success(self): + extra_specs_list = [ + self.data.vol_type_extra_specs] + self.utils.validate_non_replication_group_config(extra_specs_list) + + def test_validate_non_replication_group_config_failure(self): + extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', + utils.IS_RE: ' True'} + self.assertRaises(exception.InvalidInput, + self.utils.validate_non_replication_group_config, + [extra_specs]) + try: + self.utils.validate_non_replication_group_config([extra_specs]) + except exception.InvalidInput as e: + expected_msg = ( + 'Invalid input received: Replication is enabled in one or ' + 'more of the Volume Types being added to new Volume Group ' + 'but the Volume Group is not replication enabled. Please ' + 'enable replication in the Volume Group or select only ' + 'non-replicated Volume Types.') + self.assertEqual(expected_msg, e.msg) + + def test_get_migration_delete_extra_specs_replicated(self): + volume = self.data.test_volume + metadata = deepcopy(self.data.volume_metadata) + metadata[utils.IS_RE_CAMEL] = 'True' + metadata['ReplicationMode'] = utils.REP_SYNC + metadata['RDFG-Label'] = self.data.rdf_group_name_1 + volume.metadata = metadata + extra_specs = deepcopy(self.data.extra_specs) + rep_configs = self.data.multi_rep_config_list + updated_extra_specs = self.utils.get_migration_delete_extra_specs( + volume, extra_specs, rep_configs) + ref_extra_specs = deepcopy(self.data.extra_specs) + ref_extra_specs[utils.IS_RE] = True + ref_extra_specs[utils.REP_MODE] = utils.REP_SYNC + ref_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync + ref_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( + self.data.rep_backend_id_sync) + self.assertEqual(ref_extra_specs, updated_extra_specs) + + def test_get_migration_delete_extra_specs_non_replicated(self): + volume = self.data.test_volume + volume.metadata = self.data.volume_metadata + extra_specs = deepcopy(self.data.extra_specs) + extra_specs[utils.IS_RE] = True + updated_extra_specs = self.utils.get_migration_delete_extra_specs( + volume, extra_specs, None) + self.assertEqual(self.data.extra_specs, updated_extra_specs) diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py index e7a21eee599..80d6d0da1e6 100644 --- a/cinder/volume/drivers/dell_emc/powermax/common.py +++ b/cinder/volume/drivers/dell_emc/powermax/common.py @@ -187,9 +187,8 @@ class PowerMaxCommon(object): self.ucode_level = None self.next_gen = False self.replication_enabled = False - self.extend_replicated_vol = False self.rep_devices = [] - self.failover = False + self.failover = True if active_backend_id else False self.powermax_array_tag_list = None self.powermax_short_host_name_template = None self.powermax_port_group_name_template = None @@ -330,32 +329,29 @@ class PowerMaxCommon(object): def _get_replication_info(self): """Gather replication information, if provided.""" - self.rep_config = None + self.rep_configs = None self.replication_targets = [] if hasattr(self.configuration, 'replication_device'): self.rep_devices = self.configuration.safe_get( 'replication_device') - if self.rep_devices and len(self.rep_devices) == 1: - self.rep_config = self.utils.get_replication_config( + if self.rep_devices: + if len(self.rep_devices) > 1: + self.utils.validate_multiple_rep_device(self.rep_devices) + self.rep_configs = self.utils.get_replication_config( self.rep_devices) - if self.rep_config: - self.replication_targets = [self.rep_config['array']] - if self.active_backend_id == self.rep_config['array']: - self.failover = True - self.extend_replicated_vol = self.rep_config['allow_extend'] - self.replication_enabled = True - LOG.debug("The replication configuration is %(rep_config)s.", - {'rep_config': self.rep_config}) + # use self.replication_enabled for update_volume_stats + self.replication_enabled = True + self.replication_targets = self.utils.get_replication_targets( + self.rep_configs) + LOG.debug("The replication configuration is %(rep_configs)s.", + {'rep_configs': self.rep_configs}) if self.next_gen: - self.rep_config[utils.RDF_CONS_EXEMPT] = True + for rc in self.rep_configs: + rc[utils.RDF_CONS_EXEMPT] = True else: - self.rep_config[utils.RDF_CONS_EXEMPT] = False - - elif self.rep_devices and len(self.rep_devices) > 1: - LOG.error("More than one replication target is configured. " - "Dell EMC PowerMax/VMAX only suppports a single " - "replication target. Replication will not be enabled.") + for rc in self.rep_configs: + rc[utils.RDF_CONS_EXEMPT] = False def _get_slo_workload_combinations(self, array_info): """Method to query the array for SLO and Workloads. @@ -448,7 +444,7 @@ class PowerMaxCommon(object): volume_size = volume.size volume_dict, rep_update, rep_info_dict = self._create_volume( - volume_name, volume_size, extra_specs) + volume, volume_name, volume_size, extra_specs) if rep_update: rep_driver_data = rep_update['replication_driver_data'] @@ -469,6 +465,9 @@ class PowerMaxCommon(object): model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( volume_dict['array'], volume_dict['device_id'])) + if rep_update: + model_update['metadata']['BackendID'] = extra_specs[ + utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) @@ -535,6 +534,9 @@ class PowerMaxCommon(object): model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( clone_dict['array'], clone_dict['device_id'])) + if rep_update: + model_update['metadata']['BackendID'] = extra_specs[ + utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) self.volume_metadata.capture_create_volume( @@ -574,6 +576,9 @@ class PowerMaxCommon(object): model_update = self.update_metadata( model_update, clone_volume.metadata, self.get_volume_metadata( clone_dict['array'], clone_dict['device_id'])) + if rep_update: + model_update['metadata']['BackendID'] = extra_specs[ + utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) self.volume_metadata.capture_create_volume( @@ -686,12 +691,18 @@ class PowerMaxCommon(object): """ mv_list, sg_list = None, None extra_specs = self._initial_setup(volume) + rep_config = None + rep_extra_specs = None if 'qos' in extra_specs: del extra_specs['qos'] - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - if self.utils.is_volume_failed_over(volume): - extra_specs = rep_extra_specs + if self.utils.is_replication_enabled(extra_specs): + backend_id = self._get_replicated_volume_backend_id(volume) + rep_config = self.utils.get_rep_config( + backend_id, self.rep_configs) + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, rep_config) + if self.utils.is_volume_failed_over(volume): + extra_specs = rep_extra_specs volume_name = volume.name mgmt_sg_name = None LOG.info("Unmap volume: %(volume)s.", {'volume': volume}) @@ -727,13 +738,12 @@ class PowerMaxCommon(object): return array = extra_specs[utils.ARRAY] if self.utils.does_vol_need_rdf_management_group(extra_specs): - mgmt_sg_name = self.utils.get_rdf_management_group_name( - self.rep_config) + mgmt_sg_name = self.utils.get_rdf_management_group_name(rep_config) self._remove_members( array, volume, device_info['device_id'], extra_specs, connector, is_multiattach, async_grp=mgmt_sg_name, host_template=self.powermax_short_host_name_template) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if self.utils.is_metro_device(rep_config, extra_specs): # Need to remove from remote masking view device_info, __ = (self.find_host_lun_id( volume, host_name, extra_specs, rep_extra_specs)) @@ -787,13 +797,14 @@ class PowerMaxCommon(object): """ extra_specs = self._initial_setup(volume) is_multipath = connector.get('multipath', False) + rep_config = extra_specs.get(utils.REP_CONFIG) rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, rep_config) remote_port_group = None volume_name = volume.name LOG.info("Initialize connection: %(volume)s.", {'volume': volume_name}) - if (self.utils.is_metro_device(self.rep_config, extra_specs) + if (self.utils.is_metro_device(rep_config, extra_specs) and not is_multipath and self.protocol.lower() == 'iscsi'): LOG.warning("Multipathing is not correctly enabled " "on your system.") @@ -826,7 +837,7 @@ class PowerMaxCommon(object): self.get_port_group_from_masking_view( extra_specs[utils.ARRAY], device_info_dict['maskingview'])) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if self.utils.is_metro_device(rep_config, extra_specs): remote_info_dict, is_multiattach = ( self.find_host_lun_id(volume, connector.get('host'), extra_specs, rep_extra_specs)) @@ -854,7 +865,7 @@ class PowerMaxCommon(object): device_info_dict, port_group_name = ( self._attach_volume( volume, connector, extra_specs, masking_view_dict)) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if self.utils.is_metro_device(rep_config, extra_specs): # Need to attach on remote side metro_host_lun, remote_port_group = self._attach_metro_volume( volume, connector, is_multiattach, extra_specs, @@ -864,7 +875,7 @@ class PowerMaxCommon(object): device_info_dict['ip_and_iqn'] = ( self._find_ip_and_iqns( extra_specs[utils.ARRAY], port_group_name)) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if self.utils.is_metro_device(rep_config, extra_specs): device_info_dict['metro_ip_and_iqn'] = ( self._find_ip_and_iqns( rep_extra_specs[utils.ARRAY], remote_port_group)) @@ -1020,11 +1031,13 @@ class PowerMaxCommon(object): # Get extend workflow dependent on array gen and replication status if rep_enabled: - rdf_grp_no, _ = self.get_rdf_details(array) + rep_config = ex_specs[utils.REP_CONFIG] + rdf_grp_no, _ = self.get_rdf_details(array, rep_config) r1_ode, r1_ode_metro, r2_ode, r2_ode_metro = ( self._array_ode_capabilities_check(array, True)) + if self.next_gen: - if self.utils.is_metro_device(self.rep_config, ex_specs): + if self.utils.is_metro_device(rep_config, ex_specs): if not r1_ode_metro or not r2_ode or not r2_ode_metro: legacy_extend = True else: @@ -1032,7 +1045,8 @@ class PowerMaxCommon(object): # Handle the extend process using workflow info from previous steps if legacy_extend: - if self.rep_config.get('allow_extend', False): + rep_config = ex_specs[utils.REP_CONFIG] + if rep_config.get('allow_extend', False): LOG.info("Legacy extend volume %(volume)s to %(new_size)d GBs", {'volume': vol_name, 'new_size': int(new_size)}) self._extend_legacy_replicated_vol( @@ -1106,10 +1120,12 @@ class PowerMaxCommon(object): raise exception.VolumeBackendAPIException( message=exception_message) - def _array_ode_capabilities_check(self, array, rep_enabled=False): + def _array_ode_capabilities_check(self, array, rep_config, + rep_enabled=False): """Given an array, check Online Device Expansion (ODE) support. :param array: the array serial number + :param rep_config: the replication configuration :param rep_enabled: if replication is enabled for backend :returns: r1_ode: (bool) If R1 array supports ODE :returns: r1_ode_metro: (bool) If R1 array supports ODE with Metro vols @@ -1123,7 +1139,7 @@ class PowerMaxCommon(object): if self.next_gen: r1_ode = True if rep_enabled: - __, r2_array = self.get_rdf_details(array) + __, r2_array = self.get_rdf_details(array, rep_config) r2_ucode = self.rest.get_array_ucode_version(r2_array) if int(r1_ucode[2]) > utils.UCODE_5978_ELMSR: r1_ode_metro = True @@ -1205,8 +1221,10 @@ class PowerMaxCommon(object): already_queried = False for array_info in array_info_list: if self.failover: + rep_config = self.utils.get_rep_config( + self.active_backend_id, self.rep_configs) array_info = self.get_secondary_stats_info( - self.rep_config, array_info) + rep_config, array_info) # Add both SLO & Workload name in the pool name # Only insert the array details in the dict once if array_info['SerialNumber'] not in arrays: @@ -1402,15 +1420,37 @@ class PowerMaxCommon(object): if extra_specs: if extra_specs.get('replication_enabled') == ' True': extra_specs[utils.IS_RE] = True - if self.rep_config: - if self.rep_config.get('mode'): - extra_specs[utils.REP_MODE] = self.rep_config['mode'] - if self.rep_config.get(utils.METROBIAS): - extra_specs[utils.METROBIAS] = self.rep_config[ - utils.METROBIAS] + backend_id = self._get_replicated_volume_backend_id(volume) + rep_config = self.utils.get_rep_config( + backend_id, self.rep_configs) + if rep_config is None: + msg = _('Could not determine which rep_device to use ' + 'from cinder.conf') + raise exception.VolumeBackendAPIException(msg) + extra_specs[utils.REP_CONFIG] = rep_config + if rep_config.get('mode'): + extra_specs[utils.REP_MODE] = rep_config['mode'] + if rep_config.get(utils.METROBIAS): + extra_specs[utils.METROBIAS] = ( + rep_config[utils.METROBIAS]) return extra_specs, qos_specs + def _get_replicated_volume_backend_id(self, volume): + """Given a volume, return its rep device backend id. + + :param volume: volume used to retrieve backend id -- volume + :returns: backend id -- str + """ + backend_id = utils.BACKEND_ID_LEGACY_REP + volume_extra_specs = self.utils.get_volumetype_extra_specs(volume) + if volume_extra_specs: + volume_backend_id = volume_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID) + if volume_backend_id: + backend_id = volume_backend_id + return backend_id + def _find_device_on_array(self, volume, extra_specs): """Given the volume get the PowerMax/VMAX device Id. @@ -1544,7 +1584,8 @@ class PowerMaxCommon(object): extra_specs = self._initial_setup(volume) mv_list, __ = self._get_masking_views_from_volume(array, device_id, host) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if self.utils.is_metro_device( + extra_specs.get(utils.REP_CONFIG), extra_specs): is_metro = True return mv_list, is_metro @@ -1891,20 +1932,19 @@ class PowerMaxCommon(object): array, device_id, volume_name, extra_specs) return volume_name - def _create_volume( - self, volume_name, volume_size, extra_specs): + def _create_volume(self, volume, volume_name, volume_size, extra_specs): """Create a volume. + :param volume_name: the volume :param volume_name: the volume name :param volume_size: the volume size :param extra_specs: extra specifications - :param in_use: if the volume is in 'in-use' state - :returns: volume_dict --dict + :returns: volume_dict, rep_update, rep_info_dict --dict :raises: VolumeBackendAPIException: """ # Set Create Volume options - is_re, rep_mode, rep_first_vol = False, None, False - rep_extra_specs, rep_info_dict, rep_update = dict(), dict(), dict() + is_re, rep_mode, storagegroup_name = False, None, None + rep_info_dict, rep_update = dict(), dict() # Get Array details array = extra_specs[utils.ARRAY] array_model, next_gen = self.rest.get_array_model_info(array) @@ -1937,58 +1977,106 @@ class PowerMaxCommon(object): if self.utils.is_replication_enabled(extra_specs): is_re, rep_mode = True, extra_specs['rep_mode'] - rep_first_vol, rep_extra_specs, rep_info_dict = ( - self.prepare_replication_details(extra_specs)) storagegroup_name = self.masking.get_or_create_default_storage_group( array, extra_specs[utils.SRP], extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, is_re, rep_mode) + existing_devices = self.rest.get_volumes_in_storage_group( + array, storagegroup_name) + try: - volume_dict = self.provision.create_volume_from_sg( - array, volume_name, storagegroup_name, - volume_size, extra_specs, rep_info_dict) - if is_re: - rep_vol_dict = deepcopy(volume_dict) - rep_vol_dict.update({'device_uuid': volume_name, - 'storage_group': rep_extra_specs[ - 'sg_name'], - 'size': volume_size}) - if rep_first_vol: - self.srdf_protect_storage_group( - extra_specs, rep_extra_specs, rep_vol_dict) - - remote_device_id = self.get_and_set_remote_device_uuid( - extra_specs, rep_extra_specs, rep_vol_dict) - rep_vol_dict.update({'remote_device_id': remote_device_id}) - rep_update, rep_info_dict = self.gather_replication_updates( - extra_specs, rep_extra_specs, rep_vol_dict) - - if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: - self._add_volume_to_rdf_management_group( - array, volume_dict['device_id'], volume_name, - rep_extra_specs['array'], remote_device_id, - extra_specs) - + if not is_re: + volume_dict = self.provision.create_volume_from_sg( + array, volume_name, storagegroup_name, + volume_size, extra_specs, rep_info=None) + else: + volume_dict, rep_update, rep_info_dict = ( + self._create_replication_enabled_volume( + array, volume, volume_name, volume_size, extra_specs, + storagegroup_name, rep_mode)) except Exception: - # if the volume create fails, check if the - # storage group needs to be cleaned up - LOG.error("Create volume failed. Checking if " - "storage group cleanup necessary...") - num_vol_in_sg = self.rest.get_num_vols_in_sg( - array, storagegroup_name) - - if num_vol_in_sg == 0: - LOG.debug("There are no volumes in the storage group " - "%(sg_id)s. Deleting storage group.", - {'sg_id': storagegroup_name}) - self.rest.delete_storage_group( - array, storagegroup_name) + if storagegroup_name: + updated_devices = set(self.rest.get_volumes_in_storage_group( + array, storagegroup_name)) + devices_to_delete = [device for device in updated_devices + if device not in existing_devices] + if devices_to_delete: + self._cleanup_volume_create_post_failure( + volume, volume_name, extra_specs, devices_to_delete) + elif not existing_devices: + self.rest.delete_storage_group(array, storagegroup_name) raise return volume_dict, rep_update, rep_info_dict + def _create_replication_enabled_volume( + self, array, volume, volume_name, volume_size, extra_specs, + storagegroup_name, rep_mode): + """Create a volume with replication enabled + + :param array: the primary array + :param volume: the volume + :param volume_name: the volume name + :param volume_size: the volume size + :param extra_specs: extra specifications + :param storagegroup_name: the storage group name + :param rep_mode: the replication mode + :returns: volume_dict, rep_update, rep_info_dict --dict + """ + @coordination.synchronized('emc-first-rdf-vol-sg') + def _is_first_vol_in_replicated_sg(): + vol_dict = dict() + first_vol, rep_ex_specs, rep_info, rdfg_empty = ( + self.prepare_replication_details(extra_specs)) + if first_vol: + vol_dict = self.provision.create_volume_from_sg( + array, volume_name, storagegroup_name, + volume_size, extra_specs, rep_info) + rep_vol = deepcopy(vol_dict) + rep_vol.update({'device_uuid': volume_name, + 'storage_group': storagegroup_name, + 'size': volume_size}) + if first_vol and rdfg_empty: + # First volume in SG, first volume in RDFG + self.srdf_protect_storage_group( + extra_specs, rep_ex_specs, rep_vol) + elif not rdfg_empty and not rep_info: + # First volume in SG, not first in RDFG + self.configure_volume_replication( + array, volume, vol_dict['device_id'], extra_specs) + return first_vol, rep_ex_specs, vol_dict + + is_first_volume, rep_extra_specs, volume_info_dict = ( + _is_first_vol_in_replicated_sg()) + + if not is_first_volume: + __, rep_extra_specs, rep_info_dict, _ = ( + self.prepare_replication_details(extra_specs)) + volume_info_dict = self.provision.create_volume_from_sg( + array, volume_name, storagegroup_name, + volume_size, extra_specs, rep_info_dict) + + rep_vol_dict = deepcopy(volume_info_dict) + rep_vol_dict.update({'device_uuid': volume_name, + 'storage_group': storagegroup_name, + 'size': volume_size}) + + remote_device_id = self.get_and_set_remote_device_uuid( + extra_specs, rep_extra_specs, rep_vol_dict) + rep_vol_dict.update({'remote_device_id': remote_device_id}) + rep_update, rep_info_dict = self.gather_replication_updates( + extra_specs, rep_extra_specs, rep_vol_dict) + + if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: + self._add_volume_to_rdf_management_group( + array, volume_info_dict['device_id'], volume_name, + rep_extra_specs['array'], remote_device_id, + extra_specs) + + return volume_info_dict, rep_update, rep_info_dict + def _set_vmax_extra_specs(self, extra_specs, pool_record): """Set the PowerMax/VMAX extra specs. @@ -2173,9 +2261,13 @@ class PowerMaxCommon(object): :param extra_specs: the extra specifications :param volume: the volume object """ - # Cleanup replication + if volume and volume.migration_status == 'deleting': + extra_specs = self.utils.get_migration_delete_extra_specs( + volume, extra_specs, self.rep_configs) + # Cleanup remote replication if self.utils.is_replication_enabled(extra_specs): - rdf_group_no, __ = self.get_rdf_details(array) + rdf_group_no, __ = self.get_rdf_details( + array, extra_specs[utils.REP_CONFIG]) self.cleanup_rdf_device_pair(array, rdf_group_no, device_id, extra_specs) else: @@ -2195,12 +2287,13 @@ class PowerMaxCommon(object): """ resume_replication, rdf_mgmt_cleanup = False, False rdf_mgmt_sg, vols_in_mgmt_sg = None, None + rep_config = extra_specs[utils.REP_CONFIG] rep_mode = extra_specs['rep_mode'] if rep_mode in [utils.REP_METRO, utils.REP_ASYNC]: extra_specs['force_vol_remove'] = True - rdf_group_no, remote_array = self.get_rdf_details(array) + rdf_group_no, remote_array = self.get_rdf_details(array, rep_config) rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, rep_config) # 1. Get the remote device ID so it can be deleted later remote_device = self.rest.get_rdf_pair_volume( @@ -2215,8 +2308,7 @@ class PowerMaxCommon(object): # Make sure devices are in a valid state before continuing self.rest.wait_for_rdf_pair_sync( array, rdf_group_no, device_id, rep_extra_specs) - rdf_mgmt_sg = self.utils.get_rdf_management_group_name( - self.rep_config) + rdf_mgmt_sg = self.utils.get_rdf_management_group_name(rep_config) vols_in_mgmt_sg = self.rest.get_num_vols_in_sg(array, rdf_mgmt_sg) if vols_in_mgmt_sg > 1: @@ -2235,7 +2327,7 @@ class PowerMaxCommon(object): "There is more than one storage group associated with device " "%(dev)s not including RDF management groups. Please check " "device is not member of non-OpenStack managed storage " - "groups" % {'dev': device_id})) + "groups") % {'dev': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) else: @@ -2281,11 +2373,13 @@ class PowerMaxCommon(object): :param rdf_mgmt_cleanup: is RDF management group cleanup required """ vols_in_sg = self.rest.get_num_vols_in_sg(array, sg_name) + vols_in_remote_sg = self.rest.get_num_vols_in_sg(remote_array, sg_name) if not vols_in_sg: parent_sg = self.masking.get_parent_sg_from_child( array, sg_name) self.rest.delete_storage_group(array, sg_name) - self.rest.delete_storage_group(remote_array, sg_name) + if not vols_in_remote_sg: + self.rest.delete_storage_group(remote_array, sg_name) if parent_sg: vols_in_parent = self.rest.get_num_vols_in_sg( array, parent_sg) @@ -2315,27 +2409,27 @@ class PowerMaxCommon(object): short_host_name = self.utils.get_host_name_label( host, self.powermax_short_host_name_template) if host else None extra_specs = self._initial_setup(volume) - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) if self.utils.is_volume_failed_over(volume): + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, extra_specs[utils.REP_CONFIG]) extra_specs = rep_extra_specs device_id = self._find_device_on_array(volume, extra_specs) target_wwns = self._get_target_wwns_from_masking_view( device_id, short_host_name, extra_specs) - if self.utils.is_metro_device(self.rep_config, extra_specs): + if extra_specs.get(utils.REP_CONFIG) and self.utils.is_metro_device( + extra_specs[utils.REP_CONFIG], extra_specs): rdf_group_no, __ = self.get_rdf_details( - extra_specs[utils.ARRAY]) - + extra_specs[utils.ARRAY], extra_specs[utils.REP_CONFIG]) rdf_pair_info = self.rest.get_rdf_pair_volume( extra_specs[utils.ARRAY], rdf_group_no, device_id) remote_device_id = rdf_pair_info.get('remoteVolumeName', None) - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, extra_specs[utils.REP_CONFIG]) metro_wwns = self._get_target_wwns_from_masking_view( remote_device_id, short_host_name, rep_extra_specs) + return target_wwns, metro_wwns def _get_target_wwns_from_masking_view( @@ -2447,7 +2541,7 @@ class PowerMaxCommon(object): replication_enabled = self.utils.is_replication_enabled(extra_specs) if replication_enabled: copy_mode = True - __, rep_extra_specs, __ = ( + __, rep_extra_specs, __, __ = ( self.prepare_replication_details(extra_specs)) # PowerMax/VMAX supports using a target volume that is bigger than @@ -2455,7 +2549,7 @@ class PowerMaxCommon(object): # size at this point to avoid having to extend later try: clone_dict, rep_update, rep_info_dict = self._create_volume( - clone_name, clone_volume.size, extra_specs) + clone_volume, clone_name, clone_volume.size, extra_specs) target_device_id = clone_dict['device_id'] if target_device_id: @@ -2783,6 +2877,12 @@ class PowerMaxCommon(object): model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( array, device_id)) + + if rep_model_update: + target_backend_id = extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, 'None') + model_update['metadata']['BackendID'] = target_backend_id + self.volume_metadata.capture_manage_existing( volume, rep_info_dict, device_id, extra_specs) @@ -2796,17 +2896,18 @@ class PowerMaxCommon(object): :param device_id: the device id :param volume: the volume object :param volume_name: the volume name - :param rep_info_dict: replication information dictionary + :param rep_extra_specs: replication information dictionary :returns: replication status, device pair info, replication info -- str, dict, dict """ + rdf_group_no = rep_extra_specs['rdf_group_no'] remote_array = rep_extra_specs['array'] rep_mode = rep_extra_specs['rep_mode'] - + rep_config = rep_extra_specs[utils.REP_CONFIG] if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: rep_extra_specs['mgmt_sg_name'] = ( - self.utils.get_rdf_management_group_name(self.rep_config)) + self.utils.get_rdf_management_group_name(rep_config)) else: rep_extra_specs['mgmt_sg_name'] = None @@ -2817,7 +2918,7 @@ class PowerMaxCommon(object): exception_message = (_( "Unable to RDF protect device %(dev)s in OpenStack managed " "storage group because it currently exists in one or more " - "user managed storage groups." % {'dev': device_id})) + "user managed storage groups.") % {'dev': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) @@ -2831,7 +2932,7 @@ class PowerMaxCommon(object): rdf_group_no=rdf_group_no, target_name=target_name, remote_array=remote_array, target_device_id=r2_device_id, replication_status=rep_status, rep_mode=rep_mode, - rdf_group_label=self.rep_config['rdf_group_label'], + rdf_group_label=rep_config['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model'], mgmt_sg_name=rep_extra_specs['mgmt_sg_name']) @@ -3396,7 +3497,8 @@ class PowerMaxCommon(object): extra_specs = self._initial_setup(volume) if not self.utils.is_retype_supported(volume, extra_specs, - new_type['extra_specs']): + new_type['extra_specs'], + self.rep_configs): src_mode = extra_specs.get('rep_mode', 'non-replicated') LOG.error("It is not possible to perform host-assisted retype " "from %(src_mode)s to Metro replication type whilst the " @@ -3427,10 +3529,9 @@ class PowerMaxCommon(object): :param extra_specs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ - vol_is_replicated = self.utils.is_replication_enabled(extra_specs) # Check if old type and new type have different replication types do_change_replication = self.utils.change_replication( - vol_is_replicated, new_type) + extra_specs, new_type[utils.EXTRA_SPECS]) is_compression_disabled = self.utils.is_compression_disabled( extra_specs) # Check if old type and new type have different compression types @@ -3506,14 +3607,31 @@ class PowerMaxCommon(object): was_rep_enabled = self.utils.is_replication_enabled(extra_specs) if self.utils.is_replication_enabled(target_extra_specs): - rep_mode = self.rep_config['mode'] + target_backend_id = target_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + target_rep_config = self.utils.get_rep_config( + target_backend_id, self.rep_configs) + rep_mode = target_rep_config['mode'] target_extra_specs[utils.REP_MODE] = rep_mode + target_extra_specs[utils.REP_CONFIG] = target_rep_config is_rep_enabled = True else: is_rep_enabled = False - # Scenario: Rep -> Non-Rep - if was_rep_enabled and not is_rep_enabled: + backend_ids_differ = False + if was_rep_enabled and is_rep_enabled: + curr_backend_id = extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + tgt_backend_id = target_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + backend_ids_differ = curr_backend_id != tgt_backend_id + + # Scenario 1: Rep -> Non-Rep + # Scenario 2: Cleanup for Rep -> Diff Rep type + if (was_rep_enabled and not is_rep_enabled) or backend_ids_differ: rep_extra_specs, resume_rdf = ( self.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs)) @@ -3521,8 +3639,10 @@ class PowerMaxCommon(object): 'replication_status': REPLICATION_DISABLED, 'replication_driver_data': None} - # Scenario: Non-Rep -> Rep - elif not was_rep_enabled and is_rep_enabled: + # Scenario 1: Non-Rep -> Rep + # Scenario 2: Rep -> Diff Rep type + if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: + self._sync_check(array, device_id, extra_specs) (rep_status, rep_driver_data, rep_info_dict, rep_extra_specs, resume_rdf) = ( self.configure_volume_replication( @@ -3533,7 +3653,6 @@ class PowerMaxCommon(object): {'device_id': rep_info_dict['target_device_id'], 'array': rep_info_dict['remote_array']})} - # Retype Volume success, target_sg_name = self._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) @@ -3550,8 +3669,9 @@ class PowerMaxCommon(object): {'device_id': tgt_device_id, 'array': rdf_pair_info['remoteSymmetrixId']})} - # Scenario: Rep -> Rep - if was_rep_enabled and is_rep_enabled: + # Scenario: Rep -> Same Rep + if was_rep_enabled and is_rep_enabled and not backend_ids_differ: + # No change in replication config, retype remote device success = self._retype_remote_volume( array, volume, device_id, volume_name, rep_mode, is_rep_enabled, target_extra_specs) @@ -3566,10 +3686,17 @@ class PowerMaxCommon(object): model_update, volume.metadata, self.get_volume_metadata(array, device_id)) + target_backend_id = None + if is_rep_enabled: + target_backend_id = target_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, 'None') + model_update['metadata']['BackendID'] = target_backend_id + self.volume_metadata.capture_retype_info( volume, device_id, array, srp, target_slo, target_workload, target_sg_name, is_rep_enabled, rep_mode, - target_extra_specs[utils.DISABLECOMPRESSION]) + target_extra_specs[utils.DISABLECOMPRESSION], + target_backend_id) return success, model_update @@ -3599,7 +3726,7 @@ class PowerMaxCommon(object): if self.utils.is_replication_enabled(target_extra_specs): is_re, rep_mode = True, target_extra_specs['rep_mode'] mgmt_sg_name = self.utils.get_rdf_management_group_name( - self.rep_config) + target_extra_specs[utils.REP_CONFIG]) device_info = self.rest.get_volume(array, device_id) @@ -3723,8 +3850,9 @@ class PowerMaxCommon(object): :returns: bool """ success = True + rep_config = extra_specs[utils.REP_CONFIG] rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, rep_config) target_device = self.rest.get_rdf_pair_volume( array, rep_extra_specs['rdf_group_no'], device_id) target_device_id = target_device['remoteVolumeName'] @@ -3747,7 +3875,7 @@ class PowerMaxCommon(object): move_rqd = False break if move_rqd: - success = self._retype_volume( + success, __ = self._retype_volume( remote_array, rep_extra_specs[utils.SRP], target_device_id, volume, volume_name, rep_extra_specs, extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], @@ -3868,9 +3996,11 @@ class PowerMaxCommon(object): resume_rdf, mgmt_sg_name = False, None disable_compression = self.utils.is_compression_disabled( extra_specs) - rdf_group_no, remote_array = self.get_rdf_details(array) + rep_config = extra_specs[utils.REP_CONFIG] + rdf_group_no, remote_array = self.get_rdf_details( + array, rep_config) rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, rep_config) rep_mode = rep_extra_specs['rep_mode'] rep_extra_specs['mgmt_sg_name'] = None group_details = self.rest.get_rdf_group(array, rdf_group_no) @@ -3886,7 +4016,7 @@ class PowerMaxCommon(object): if group_details['numDevices'] > 0 and ( rep_mode in [utils.REP_ASYNC, utils.REP_METRO]): mgmt_sg_name = self.utils.get_rdf_management_group_name( - self.rep_config) + rep_config) self.rest.srdf_suspend_replication( array, mgmt_sg_name, rdf_group_no, rep_extra_specs) rep_extra_specs['mgmt_sg_name'] = mgmt_sg_name @@ -3919,7 +4049,7 @@ class PowerMaxCommon(object): rdf_group_no=rdf_group_no, target_name=target_name, remote_array=remote_array, target_device_id=r2_device_id, replication_status=rep_status, rep_mode=rep_mode, - rdf_group_label=self.rep_config['rdf_group_label'], + rdf_group_label=rep_config['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model'], mgmt_sg_name=rep_extra_specs['mgmt_sg_name']) @@ -3929,7 +4059,7 @@ class PowerMaxCommon(object): def _add_volume_to_rdf_management_group( self, array, device_id, volume_name, remote_array, target_device_id, extra_specs): - """Add an volume to its rdf management group. + """Add a volume to its rdf management group. :param array: the array serial number :param device_id: the device id @@ -3939,22 +4069,22 @@ class PowerMaxCommon(object): :param extra_specs: the extra specifications :raises: VolumeBackendAPIException """ - group_name = self.utils.get_rdf_management_group_name( - self.rep_config) + grp_name = self.utils.get_rdf_management_group_name( + extra_specs[utils.REP_CONFIG]) try: - self.provision.get_or_create_group(array, group_name, extra_specs) + self.provision.get_or_create_group(array, grp_name, extra_specs) self.masking.add_volume_to_storage_group( - array, device_id, group_name, volume_name, extra_specs, + array, device_id, grp_name, volume_name, extra_specs, force=True) # Add remote volume self.provision.get_or_create_group( - remote_array, group_name, extra_specs) + remote_array, grp_name, extra_specs) self.masking.add_volume_to_storage_group( - remote_array, target_device_id, group_name, volume_name, + remote_array, target_device_id, grp_name, volume_name, extra_specs, force=True) except Exception as e: exception_message = ( - _('Exception occurred adding volume %(vol)s to its async ' + _('Exception occurred adding volume %(vol)s to its ' 'rdf management group - the exception received was: %(e)s') % {'vol': volume_name, 'e': six.text_type(e)}) LOG.error(exception_message) @@ -3976,8 +4106,12 @@ class PowerMaxCommon(object): # Set session attributes resume_rdf, mgmt_sg_name = True, None + rep_mode = extra_specs['rep_mode'] + rep_config = extra_specs[utils.REP_CONFIG] + if rep_mode in [utils.REP_METRO, utils.REP_ASYNC]: + extra_specs['force_vol_remove'] = True rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, rep_config) extra_specs['force_vol_remove'] = True rep_extra_specs['force_vol_remove'] = True remote_array = rep_extra_specs['array'] @@ -3990,13 +4124,13 @@ class PowerMaxCommon(object): # before any operations are carried out - this will be used later for # remove vol operations r1_sg_names = self.rest.get_storage_groups_from_volume( - remote_array, remote_device_id) + array, device_id) r2_sg_names = self.rest.get_storage_groups_from_volume( remote_array, remote_device_id) if rep_extra_specs['rep_mode'] in [utils.REP_ASYNC, utils.REP_METRO]: mgmt_sg_name = self.utils.get_rdf_management_group_name( - self.rep_config) + rep_config) sg_name = mgmt_sg_name else: sg_name = r1_sg_names[0] @@ -4021,9 +4155,9 @@ class PowerMaxCommon(object): array, device_id, volume_name, mgmt_sg_name, extra_specs) # Remove volume from R2 replication SGs - for sg_name in r2_sg_names: + for r2_sg_name in r2_sg_names: self.masking.remove_volume_from_sg( - remote_array, remote_device_id, volume_name, sg_name, + remote_array, remote_device_id, volume_name, r2_sg_name, rep_extra_specs) if mgmt_sg_name: @@ -4034,7 +4168,6 @@ class PowerMaxCommon(object): if not self.rest.get_volumes_in_storage_group(array, sg_name): resume_rdf = False - # self.rest.delete_volume(remote_array, remote_device_id) self._delete_from_srp(remote_array, remote_device_id, volume_name, extra_specs) @@ -4063,7 +4196,7 @@ class PowerMaxCommon(object): rep_mode = rep_extra_specs['rep_mode'] if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: async_grp = self.utils.get_rdf_management_group_name( - self.rep_config) + rep_extra_specs[utils.REP_CONFIG]) sg_name = self.rest.get_storage_groups_from_volume( array, device_id) @@ -4082,7 +4215,6 @@ class PowerMaxCommon(object): if rdfg_details and int(rdfg_details.get('numDevices', 0)): self.rest.srdf_resume_replication( array, sg_name, rdf_group, rep_extra_specs) - self._delete_from_srp( remote_array, target_device, volume_name, rep_extra_specs) @@ -4113,13 +4245,14 @@ class PowerMaxCommon(object): self._delete_from_srp( array, device_id, volume_name, extra_specs) - def get_rdf_details(self, array): + def get_rdf_details(self, array, rep_config): """Retrieves an SRDF group instance. :param array: the array serial number + :param rep_config: rep config to get details of :returns: rdf_group_no, remote_array """ - if not self.rep_config: + if not self.rep_configs: exception_message = (_("Replication is not configured on " "backend: %(backend)s.") % {'backend': self.configuration.safe_get( @@ -4128,8 +4261,8 @@ class PowerMaxCommon(object): raise exception.VolumeBackendAPIException( message=exception_message) - remote_array = self.rep_config['array'] - rdf_group_label = self.rep_config['rdf_group_label'] + remote_array = rep_config['array'] + rdf_group_label = rep_config['rdf_group_label'] LOG.info("Replication group: %(RDFGroup)s.", {'RDFGroup': rdf_group_label}) rdf_group_no = self.rest.get_rdf_group_number(array, rdf_group_label) @@ -4159,35 +4292,19 @@ class PowerMaxCommon(object): :returns: secondary_id, volume_update_list, group_update_list :raises: VolumeBackendAPIException """ + is_valid, msg = self.utils.validate_failover_request( + self.failover, secondary_id, self.rep_configs) + if not is_valid: + LOG.error(msg) + raise exception.InvalidReplicationTarget(msg) + group_fo = None - if secondary_id != 'default': - if not self.failover: - self.failover = True - if self.rep_config: - secondary_id = self.rep_config['array'] - else: - exception_message = (_( - "Backend %(backend)s is already failed over. " - "If you wish to failback, please append " - "'--backend_id default' to your command.") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - return + if not self.failover: + self.failover = True + self.active_backend_id = secondary_id if secondary_id else None else: - if self.failover: - self.failover = False - secondary_id = None - group_fo = 'default' - else: - exception_message = (_( - "Cannot failback backend %(backend)s- backend not " - "in failed over state. If you meant to failover, please " - "omit the '--backend_id default' from the command") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - return + self.failover = False + group_fo = 'default' volume_update_list, group_update_list = ( self._populate_volume_and_group_update_lists( @@ -4207,7 +4324,7 @@ class PowerMaxCommon(object): """ volume_update_list = [] group_update_list = [] - rep_mode = self.rep_config['mode'] + if groups: for group in groups: vol_list = [] @@ -4221,43 +4338,53 @@ class PowerMaxCommon(object): group_update_list.append({'group_id': group.id, 'updates': grp_update}) volume_update_list += vol_updates - sync_vol_list, non_rep_vol_list, async_vol_list, metro_list = ( - [], [], [], []) + + non_rep_vol_list, sync_vol_dict, async_vol_dict, metro_vol_list = ( + [], {}, {}, []) for volume in volumes: array = ast.literal_eval(volume.provider_location)['array'] extra_specs = self._initial_setup(volume) extra_specs[utils.ARRAY] = array if self.utils.is_replication_enabled(extra_specs): - device_id = self._find_device_on_array( - volume, extra_specs) - self._sync_check( - array, device_id, extra_specs) + device_id = self._find_device_on_array(volume, extra_specs) + self._sync_check(array, device_id, extra_specs) + + rep_mode = extra_specs.get(utils.REP_MODE, utils.REP_SYNC) + backend_id = self._get_replicated_volume_backend_id( + volume) + rep_config = self.utils.get_rep_config( + backend_id, self.rep_configs) + if rep_mode == utils.REP_SYNC: - sync_vol_list.append(volume) + key = rep_config['rdf_group_label'] + sync_vol_dict.setdefault(key, []).append(volume) elif rep_mode == utils.REP_ASYNC: - async_vol_list.append(volume) + vol_grp_name = self.utils.get_rdf_management_group_name( + rep_config) + async_vol_dict.setdefault(vol_grp_name, []).append(volume) else: - metro_list.append(volume) + metro_vol_list.append(volume) else: non_rep_vol_list.append(volume) - if len(async_vol_list) > 0: - vol_grp_name = self.utils.get_rdf_management_group_name( - self.rep_config) - __, vol_updates = ( - self._failover_replication( + if len(sync_vol_dict) > 0: + for key, sync_vol_list in sync_vol_dict.items(): + vol_updates = ( + self._update_volume_list_from_sync_vol_list( + sync_vol_list, group_fo)) + volume_update_list += vol_updates + + if len(async_vol_dict) > 0: + for vol_grp_name, async_vol_list in async_vol_dict.items(): + __, vol_updates = self._failover_replication( async_vol_list, None, vol_grp_name, - secondary_backend_id=group_fo, host=True)) - volume_update_list += vol_updates + secondary_backend_id=group_fo, host=True) + volume_update_list += vol_updates - if len(sync_vol_list) > 0: - volume_update_list = self. _update_volume_list_from_sync_vol_list( - sync_vol_list, volume_update_list, group_fo) - - if len(metro_list) > 0: + if len(metro_vol_list) > 0: __, vol_updates = ( self._failover_replication( - sync_vol_list, None, None, secondary_backend_id=group_fo, + metro_vol_list, None, None, secondary_backend_id=group_fo, host=True, is_metro=True)) volume_update_list += vol_updates @@ -4272,20 +4399,19 @@ class PowerMaxCommon(object): return volume_update_list, group_update_list def _update_volume_list_from_sync_vol_list( - self, sync_vol_list, volume_update_list, group_fo): + self, sync_vol_list, group_fo): """Update the volume update list from the synced volume list :param sync_vol_list: synced volume list - :param volume_update_list: volume update list :param group_fo: group fail over - :returns: volume_update_list + :returns: vol_updates """ extra_specs = self._initial_setup(sync_vol_list[0]) array = ast.literal_eval( sync_vol_list[0].provider_location)['array'] extra_specs[utils.ARRAY] = array temp_grp_name = self.utils.get_temp_failover_grp_name( - self.rep_config) + extra_specs[utils.REP_CONFIG]) self.provision.create_volume_group( array, temp_grp_name, extra_specs) device_ids = self._get_volume_device_ids(sync_vol_list, array) @@ -4295,9 +4421,8 @@ class PowerMaxCommon(object): self._failover_replication( sync_vol_list, None, temp_grp_name, secondary_backend_id=group_fo, host=True)) - volume_update_list += vol_updates self.rest.delete_storage_group(array, temp_grp_name) - return volume_update_list + return vol_updates def _get_replication_extra_specs(self, extra_specs, rep_config): """Get replication extra specifications. @@ -4318,7 +4443,8 @@ class PowerMaxCommon(object): # Get the RDF Group label & number rep_extra_specs['rdf_group_label'] = rep_config['rdf_group_label'] - rdf_group_no, __ = self.get_rdf_details(extra_specs['array']) + rdf_group_no, __ = self.get_rdf_details( + extra_specs['array'], rep_config) rep_extra_specs['rdf_group_no'] = rdf_group_no # Get the SRDF wait/retries settings rep_extra_specs['sync_retries'] = rep_config['sync_retries'] @@ -4327,7 +4453,7 @@ class PowerMaxCommon(object): if rep_config['mode'] == utils.REP_METRO: exempt = True if self.next_gen else False rep_extra_specs[utils.RDF_CONS_EXEMPT] = exempt - bias = True if rep_config[utils.METROBIAS] else False + bias = True if rep_config.get(utils.METROBIAS) else False rep_extra_specs[utils.METROBIAS] = bias # If disable compression is set, check if target array is all flash @@ -4386,13 +4512,20 @@ class PowerMaxCommon(object): if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() + + # If volume types are added during creation, validate replication + # extra_spec consistency across volume types. + extra_specs_list = list() + for volume_type_id in group.get('volume_type_ids'): + vt_extra_specs = self.utils.get_volumetype_extra_specs( + None, volume_type_id) + extra_specs_list.append(vt_extra_specs) + if group.is_replicated: - if (self.rep_config and self.rep_config.get('mode') - and self.rep_config['mode'] - in [utils.REP_ASYNC, utils.REP_METRO]): - msg = _('Replication groups are not supported ' - 'for use with Asynchronous replication or Metro.') - raise exception.InvalidInput(reason=msg) + self.utils.validate_replication_group_config( + self.rep_configs, extra_specs_list) + else: + self.utils.validate_non_replication_group_config(extra_specs_list) model_update = {'status': fields.GroupStatus.AVAILABLE} @@ -4409,8 +4542,14 @@ class PowerMaxCommon(object): if group.is_replicated: LOG.debug("Group: %(group)s is a replication group.", {'group': group.id}) + target_backend_id = extra_specs_list[0].get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + target_rep_config = self.utils.get_rep_config( + target_backend_id, self.rep_configs) # Create remote group - __, remote_array = self.get_rdf_details(array) + __, remote_array = self.get_rdf_details( + array, target_rep_config) self.provision.create_volume_group( remote_array, vol_grp_name, interval_retries_dict) model_update.update({ @@ -4476,9 +4615,16 @@ class PowerMaxCommon(object): # Remove replication for group, if applicable if group.is_replicated: + vt_extra_specs = self.utils.get_volumetype_extra_specs( + None, group.get('volume_types')[0]['id']) + target_backend_id = vt_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + target_rep_config = self.utils.get_rep_config( + target_backend_id, self.rep_configs) self._cleanup_group_replication( array, vol_grp_name, volume_device_ids, - interval_retries_dict) + interval_retries_dict, target_rep_config) try: if volume_device_ids: # First remove all the volumes from the SG @@ -4558,7 +4704,8 @@ class PowerMaxCommon(object): return volumes_model_update def _cleanup_group_replication( - self, array, vol_grp_name, volume_device_ids, extra_specs): + self, array, vol_grp_name, volume_device_ids, extra_specs, + rep_config): """Cleanup remote replication. Break and delete the rdf replication relationship and @@ -4567,8 +4714,9 @@ class PowerMaxCommon(object): :param vol_grp_name: the volume group name :param volume_device_ids: the device ids of the local volumes :param extra_specs: the extra specifications + :param rep_config: the rep config to use for rdf operations """ - rdf_group_no, remote_array = self.get_rdf_details(array) + rdf_group_no, remote_array = self.get_rdf_details(array, rep_config) # Delete replication for group, if applicable group_details = self.rest.get_storage_group_rep( array, vol_grp_name) @@ -4840,6 +4988,9 @@ class PowerMaxCommon(object): add_vols, group, interval_retries_dict) # Remove volume(s) from the group if remove_device_ids: + if group.is_replicated: + # Need force flag when manipulating RDF enabled SGs + interval_retries_dict[utils.FORCE_VOL_REMOVE] = True self.masking.remove_volumes_from_storage_group( array, remove_device_ids, vol_grp_name, interval_retries_dict) @@ -4873,7 +5024,9 @@ class PowerMaxCommon(object): :param extra_specs: the extra specifications """ remote_device_list = [] - __, remote_array = self.get_rdf_details(array) + backend_id = self._get_replicated_volume_backend_id(volumes[0]) + rep_config = self.utils.get_rep_config(backend_id, self.rep_configs) + __, remote_array = self.get_rdf_details(array, rep_config) for vol in volumes: remote_loc = ast.literal_eval(vol.replication_driver_data) founddevice_id = self.rest.check_volume_device_id( @@ -4988,6 +5141,10 @@ class PowerMaxCommon(object): # Update the replication status if group.is_replicated: + backend = self._get_replicated_volume_backend_id(volumes[0]) + rep_config = self.utils.get_rep_config( + backend, self.rep_configs) + interval_retries_dict[utils.REP_CONFIG] = rep_config volumes_model_update = self._replicate_group( array, volumes_model_update, tgt_name, interval_retries_dict) @@ -5058,8 +5215,8 @@ class PowerMaxCommon(object): src_dev_id, extra_specs, vol_size, tgt_vol_name = ( self._get_clone_vol_info( volume, source_vols, snapshots)) - volume_dict, __, __ = self._create_volume( - tgt_vol_name, vol_size, extra_specs) + volume_dict, __, __, = self._create_volume( + volume, tgt_vol_name, vol_size, extra_specs) device_id = volume_dict['device_id'] # Add the volume to the volume group SG self.masking.add_volume_to_storage_group( @@ -5157,7 +5314,8 @@ class PowerMaxCommon(object): :returns: volumes_model_update """ ret_volumes_model_update = [] - rdf_group_no, remote_array = self.get_rdf_details(array) + rdf_group_no, remote_array = self.get_rdf_details( + array, extra_specs[utils.REP_CONFIG]) self.rest.replicate_group( array, group_name, rdf_group_no, remote_array, extra_specs) # Need to set SRP to None for remote generic volume group - Not set @@ -5212,7 +5370,8 @@ class PowerMaxCommon(object): if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) - rdf_group_no, _ = self.get_rdf_details(array) + rdf_group_no, _ = self.get_rdf_details( + array, extra_specs[utils.REP_CONFIG]) self.rest.srdf_resume_replication( array, vol_grp_name, rdf_group_no, extra_specs) model_update.update({ @@ -5253,7 +5412,8 @@ class PowerMaxCommon(object): if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) - rdf_group_no, _ = self.get_rdf_details(array) + rdf_group_no, _ = self.get_rdf_details( + array, extra_specs[utils.REP_CONFIG]) self.rest.srdf_suspend_replication( array, vol_grp_name, rdf_group_no, extra_specs) model_update.update({ @@ -5295,15 +5455,18 @@ class PowerMaxCommon(object): :returns: model_update, vol_model_updates """ model_update, vol_model_updates = dict(), list() - rdf_group_no, extra_specs, failover = None, dict(), False if not volumes: # Return if empty group return model_update, vol_model_updates + extra_specs = self._initial_setup(volumes[0]) + array = ast.literal_eval(volumes[0].provider_location)['array'] + extra_specs[utils.ARRAY] = array + failover = False if secondary_backend_id == 'default' else True + try: - extra_specs = self._initial_setup(volumes[0]) - array = ast.literal_eval(volumes[0].provider_location)['array'] - extra_specs[utils.ARRAY] = array + rdf_group_no, _ = self.get_rdf_details( + array, extra_specs[utils.REP_CONFIG]) if group: volume_group = self._find_volume_group(array, group) if volume_group: @@ -5312,11 +5475,7 @@ class PowerMaxCommon(object): if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) - # As we only support a single replication target, ignore - # any secondary_backend_id which is not 'default' - failover = False if secondary_backend_id == 'default' else True if not is_metro: - rdf_group_no, _ = self.get_rdf_details(array) if failover: self.rest.srdf_failover_group( array, vol_grp_name, rdf_group_no, extra_specs) @@ -5523,21 +5682,24 @@ class PowerMaxCommon(object): :param new_metadata: new object metadata :returns: dict -- updated model """ - if new_metadata: - self._is_dict(new_metadata, 'new object metadata') + if existing_metadata: + self._is_dict(existing_metadata, 'existing metadata') + else: + existing_metadata = dict() + if model_update: self._is_dict(model_update, 'existing model') if 'metadata' in model_update: - model_update['metadata'].update(new_metadata) + model_update['metadata'].update(existing_metadata) else: - model_update.update({'metadata': new_metadata}) + model_update.update({'metadata': existing_metadata}) else: model_update = {} - model_update.update({'metadata': new_metadata}) + model_update.update({'metadata': existing_metadata}) - if existing_metadata: - self._is_dict(existing_metadata, 'existing metadata') - model_update['metadata'].update(existing_metadata) + if new_metadata: + self._is_dict(new_metadata, 'new object metadata') + model_update['metadata'].update(new_metadata) return model_update @@ -5662,17 +5824,24 @@ class PowerMaxCommon(object): :returns: first volume in SG, replication extra specs, replication info dict -- bool, dict, dict """ - rep_info_dict, rep_first_vol = dict(), True + rep_info_dict, rep_first_vol, rdfg_empty = dict(), True, True # Get volume type replication extra specs rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) + extra_specs, extra_specs[utils.REP_CONFIG]) # Get the target SG name for the current volume create op sg_name = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode=extra_specs['rep_mode']) rep_extra_specs['sg_name'] = sg_name + # Check if the RDFG has volume in it regardless of target SG state + rdf_group_details = self.rest.get_rdf_group( + extra_specs['array'], rep_extra_specs['rdf_group_no']) + rdfg_device_count = rdf_group_details['numDevices'] + if rdfg_device_count > 0: + rdfg_empty = False + # Check if there are any volumes in the SG, will return 0 if the SG # does not exist if self.rest.get_num_vols_in_sg(extra_specs['array'], sg_name): @@ -5684,8 +5853,7 @@ class PowerMaxCommon(object): # returned local_device_list = self.rest.get_volume_list( extra_specs['array'], - {'storageGroupId': sg_name, - 'rdf_group_number': rep_extra_specs['rdf_group_no']}) + {'storageGroupId': sg_name}) # Set replication info that we will need for creating volume in # existing SG, these are not required for new SGs as the only @@ -5701,7 +5869,7 @@ class PowerMaxCommon(object): 'sync_interval': rep_extra_specs['sync_interval'], 'sync_retries': rep_extra_specs['sync_retries']}) - return rep_first_vol, rep_extra_specs, rep_info_dict + return rep_first_vol, rep_extra_specs, rep_info_dict, rdfg_empty def srdf_protect_storage_group(self, extra_specs, rep_extra_specs, volume_dict): @@ -5760,7 +5928,32 @@ class PowerMaxCommon(object): rdf_group_no=rep_extra_specs['rdf_group_no'], rep_mode=extra_specs['rep_mode'], replication_status=REPLICATION_ENABLED, - rdf_group_label=self.rep_config['rdf_group_label'], - target_array_model=rep_extra_specs['target_array_model']) + rdf_group_label=rep_extra_specs['rdf_group_label'], + target_array_model=rep_extra_specs['target_array_model'], + backend_id=rep_extra_specs[ + utils.REP_CONFIG].get(utils.BACKEND_ID, None)) return replication_update, rep_info_dict + + def _cleanup_volume_create_post_failure( + self, volume, volume_name, extra_specs, device_ids): + """Delete lingering volumes that exist in an SG post exception. + + :param volume: Cinder volume -- Volume + :param volume_name: Volume name -- str + :param extra_specs: Volume extra specs -- dict + :param device_ids: Devices ids to be deleted -- list + """ + array = extra_specs[utils.ARRAY] + for device_id in device_ids: + __, __, rdf_group = self.rest.is_vol_in_rep_session( + array, device_id) + if rdf_group: + rdf_group_no = rdf_group[0][utils.RDF_GROUP_NO] + self.cleanup_rdf_device_pair(array, rdf_group_no, device_id, + extra_specs) + else: + self.masking.remove_and_reset_members( + array, volume, device_id, volume_name, extra_specs, False) + self._delete_from_srp( + array, device_id, volume_name, extra_specs) diff --git a/cinder/volume/drivers/dell_emc/powermax/fc.py b/cinder/volume/drivers/dell_emc/powermax/fc.py index de60f3f7ce7..5d4ed7ee014 100644 --- a/cinder/volume/drivers/dell_emc/powermax/fc.py +++ b/cinder/volume/drivers/dell_emc/powermax/fc.py @@ -120,6 +120,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver): - User defined override for short host name and port group name (bp powermax-user-defined-hostname-portgroup) - Switch to Unisphere REST API public replication endpoints + - Support for multiple replication devices """ VERSION = "4.2.0" diff --git a/cinder/volume/drivers/dell_emc/powermax/iscsi.py b/cinder/volume/drivers/dell_emc/powermax/iscsi.py index 8124181e39c..f4183d2ef75 100644 --- a/cinder/volume/drivers/dell_emc/powermax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/powermax/iscsi.py @@ -125,6 +125,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver): - User defined override for short host name and port group name (bp powermax-user-defined-hostname-portgroup) - Switch to Unisphere REST API public replication endpoints + - Support for multiple replication devices """ VERSION = "4.2.0" diff --git a/cinder/volume/drivers/dell_emc/powermax/masking.py b/cinder/volume/drivers/dell_emc/powermax/masking.py index 7d10c7ba8ef..bf339b67396 100644 --- a/cinder/volume/drivers/dell_emc/powermax/masking.py +++ b/cinder/volume/drivers/dell_emc/powermax/masking.py @@ -671,9 +671,10 @@ class PowerMaxMasking(object): 'sg_name': storagegroup_name}) else: try: + force = True if extra_specs.get(utils.IS_RE) else False self.add_volume_to_storage_group( serial_number, device_id, storagegroup_name, - volume_name, extra_specs) + volume_name, extra_specs, force) except Exception as e: msg = ("Exception adding volume %(vol)s to %(sg)s. " "Exception received was %(e)s." diff --git a/cinder/volume/drivers/dell_emc/powermax/metadata.py b/cinder/volume/drivers/dell_emc/powermax/metadata.py index 879e81ce33d..6d6e8e2a279 100644 --- a/cinder/volume/drivers/dell_emc/powermax/metadata.py +++ b/cinder/volume/drivers/dell_emc/powermax/metadata.py @@ -474,7 +474,7 @@ class PowerMaxVolumeMetadata(object): None, None, None, None) rep_mode, replication_status, rdf_group_label, use_bias = ( None, None, None, None) - target_array_model = None + target_array_model, backend_id = None, None if rep_info_dict: rdf_group_no = rep_info_dict['rdf_group_no'] target_name = rep_info_dict['target_name'] @@ -483,6 +483,8 @@ class PowerMaxVolumeMetadata(object): rep_mode = rep_info_dict['rep_mode'] replication_status = rep_info_dict['replication_status'] rdf_group_label = rep_info_dict['rdf_group_label'] + backend_id = rep_info_dict['backend_id'] + if utils.METROBIAS in extra_specs: use_bias = extra_specs[utils.METROBIAS] target_array_model = rep_info_dict['target_array_model'] @@ -501,7 +503,7 @@ class PowerMaxVolumeMetadata(object): openstack_name=volume.display_name, source_volid=volume.source_volid, group_name=group_name, group_id=group_id, - rdf_group_no=rdf_group_no, + rdf_group_no=rdf_group_no, backend_id=backend_id, target_name=target_name, remote_array=remote_array, target_device_id=target_device_id, source_snapshot_id=source_snapshot_id, @@ -585,8 +587,8 @@ class PowerMaxVolumeMetadata(object): successful_operation = "manage_existing_volume" rdf_group_no, target_name, remote_array, target_device_id = ( None, None, None, None) - rep_mode, replication_status, rdf_group_label = ( - None, None, None) + rep_mode, replication_status, rdf_group_label, backend_id = ( + None, None, None, None) if rep_info_dict: rdf_group_no = rep_info_dict['rdf_group_no'] target_name = rep_info_dict['target_name'] @@ -595,6 +597,7 @@ class PowerMaxVolumeMetadata(object): rep_mode = rep_info_dict['rep_mode'] replication_status = rep_info_dict['replication_status'] rdf_group_label = rep_info_dict['rdf_group_label'] + backend_id = rep_info_dict['backend_id'] default_sg = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode) @@ -609,7 +612,7 @@ class PowerMaxVolumeMetadata(object): identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, source_volid=volume.source_volid, - rdf_group_no=rdf_group_no, + rdf_group_no=rdf_group_no, backend_id=backend_id, target_name=target_name, remote_array=remote_array, target_device_id=target_device_id, rep_mode=rep_mode, replication_status=replication_status, @@ -623,7 +626,7 @@ class PowerMaxVolumeMetadata(object): def capture_retype_info( self, volume, device_id, array, srp, target_slo, target_workload, target_sg_name, is_rep_enabled, rep_mode, - is_compression_disabled): + is_compression_disabled, target_backend_id): """Captures manage existing info in volume metadata :param volume_id: volume identifier @@ -636,6 +639,7 @@ class PowerMaxVolumeMetadata(object): :param is_rep_enabled: replication enabled flag :param rep_mode: replication mode :param is_compression_disabled: compression disabled flag + :param target_backend_id: target replication backend id """ successful_operation = "retype" if not target_slo: @@ -651,12 +655,14 @@ class PowerMaxVolumeMetadata(object): identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, is_rep_enabled=('yes' if is_rep_enabled else 'no'), - rep_mode=rep_mode, is_compression_disabled=( + backend_id=target_backend_id, rep_mode=rep_mode, + is_compression_disabled=( True if is_compression_disabled else False)) if not is_rep_enabled: delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model', 'service_level', 'remote_array', 'target_device_id', - 'replication_status', 'rdf_group_label'] + 'replication_status', 'rdf_group_label', + 'backend_id'] self.utils.delete_values_from_dict(datadict, delete_list) update_list = [('default_sg_name', 'source_sg_name'), ('service_level', 'source_service_level')] diff --git a/cinder/volume/drivers/dell_emc/powermax/provision.py b/cinder/volume/drivers/dell_emc/powermax/provision.py index 207a35988ba..dbe3f4da490 100644 --- a/cinder/volume/drivers/dell_emc/powermax/provision.py +++ b/cinder/volume/drivers/dell_emc/powermax/provision.py @@ -94,6 +94,12 @@ class PowerMaxProvision(object): def do_create_volume_from_sg(storage_group, array): start_time = time.time() + if rep_info and rep_info.get('initial_device_list', False): + local_device_list = self.rest.get_volume_list( + extra_specs['array'], + {'storageGroupId': storagegroup_name}) + rep_info['initial_device_list'] = local_device_list + volume_dict = self.rest.create_volume_from_sg( array, volume_name, storage_group, volume_size, extra_specs, rep_info) @@ -560,7 +566,7 @@ class PowerMaxProvision(object): :param array: the array serial number :param device_id: the source device id - :param sg_name: storage grto + :param sg_name: storage group :param rdf_group: the rdf group number :param rep_extra_specs: replication extra specs :param state: the state of the rdf pair diff --git a/cinder/volume/drivers/dell_emc/powermax/rest.py b/cinder/volume/drivers/dell_emc/powermax/rest.py index eaeaefc78be..a6171e8d9ea 100644 --- a/cinder/volume/drivers/dell_emc/powermax/rest.py +++ b/cinder/volume/drivers/dell_emc/powermax/rest.py @@ -924,6 +924,7 @@ class PowerMaxRest(object): task = self.wait_for_job('Create volume', status_code, job, extra_specs) + # Find the newly created volume. device_id = None if rep_info: updated_device_list = self.get_volume_list( diff --git a/cinder/volume/drivers/dell_emc/powermax/utils.py b/cinder/volume/drivers/dell_emc/powermax/utils.py index 1d7d9919307..db362b2d361 100644 --- a/cinder/volume/drivers/dell_emc/powermax/utils.py +++ b/cinder/volume/drivers/dell_emc/powermax/utils.py @@ -65,6 +65,7 @@ VOL_NAME = 'volume_name' EXTRA_SPECS = 'extra_specs' HOST_NAME = 'short_host_name' IS_RE = 'replication_enabled' +IS_RE_CAMEL = 'ReplicationEnabled' DISABLECOMPRESSION = 'storagetype:disablecompression' REP_SYNC = 'Synchronous' REP_ASYNC = 'Asynchronous' @@ -82,6 +83,10 @@ RDF_CONS_EXEMPT = 'exempt' RDF_ALLOW_METRO_DELETE = 'allow_delete_metro' RDF_GROUP_NO = 'rdf_group_number' METROBIAS = 'metro_bias' +BACKEND_ID = 'backend_id' +BACKEND_ID_LEGACY_REP = 'backend_id_legacy_rep' +REPLICATION_DEVICE_BACKEND_ID = 'storagetype:replication_device_backend_id' +REP_CONFIG = 'rep_config' DEFAULT_PORT = 8443 CLONE_SNAPSHOT_NAME = "snapshot_for_clone" STORAGE_GROUP_TAGS = 'storagetype:storagegrouptags' @@ -436,15 +441,31 @@ class PowerMaxUtils(object): else: return True - def change_replication(self, vol_is_replicated, new_type): + def change_replication(self, curr_type_extra_specs, tgt_type_extra_specs): """Check if volume types have different replication status. - :param vol_is_replicated: from source - :param new_type: from target + :param curr_type_extra_specs: extra specs from source volume type + :param tgt_type_extra_specs: extra specs from target volume type :returns: bool """ - is_tgt_rep = self.is_replication_enabled(new_type['extra_specs']) - return vol_is_replicated != is_tgt_rep + change_replication = False + # Compare non-rep & rep enabled changes + is_cur_rep = self.is_replication_enabled(curr_type_extra_specs) + is_tgt_rep = self.is_replication_enabled(tgt_type_extra_specs) + rep_enabled_diff = is_cur_rep != is_tgt_rep + + if rep_enabled_diff: + change_replication = True + elif is_cur_rep: + # Both types are rep enabled, check for backend id differences + rdbid = REPLICATION_DEVICE_BACKEND_ID + curr_rep_backend_id = curr_type_extra_specs.get(rdbid, None) + tgt_rep_backend_id = tgt_type_extra_specs.get(rdbid, None) + rdbid_diff = curr_rep_backend_id != tgt_rep_backend_id + if rdbid_diff: + change_replication = True + + return change_replication @staticmethod def is_replication_enabled(extra_specs): @@ -463,57 +484,70 @@ class PowerMaxUtils(object): """Gather necessary replication configuration info. :param rep_device_list: the replication device list from cinder.conf - :returns: rep_config, replication configuration dict + :returns: rep_configs, replication configuration list """ - rep_config = {} + rep_config = list() if not rep_device_list: return None else: - target = rep_device_list[0] - try: - rep_config['array'] = target['target_device_id'] - rep_config['srp'] = target['remote_pool'] - rep_config['rdf_group_label'] = target['rdf_group_label'] - rep_config['portgroup'] = target['remote_port_group'] + for rep_device in rep_device_list: + rep_config_element = {} + try: + rep_config_element['array'] = rep_device[ + 'target_device_id'] + rep_config_element['srp'] = rep_device['remote_pool'] + rep_config_element['rdf_group_label'] = rep_device[ + 'rdf_group_label'] + rep_config_element['portgroup'] = rep_device[ + 'remote_port_group'] - except KeyError as ke: - error_message = (_("Failed to retrieve all necessary SRDF " - "information. Error received: %(ke)s.") % - {'ke': six.text_type(ke)}) - LOG.exception(error_message) - raise exception.VolumeBackendAPIException( - message=error_message) + except KeyError as ke: + error_message = ( + _("Failed to retrieve all necessary SRDF " + "information. Error received: %(ke)s.") % + {'ke': six.text_type(ke)}) + LOG.exception(error_message) + raise exception.VolumeBackendAPIException( + message=error_message) - try: - rep_config['sync_retries'] = int(target['sync_retries']) - rep_config['sync_interval'] = int(target['sync_interval']) - except (KeyError, ValueError) as ke: - LOG.debug("SRDF Sync wait/retries options not set or set " - "incorrectly, defaulting to 200 retries with a 3 " - "second wait. Configuration load warning: %(ke)s.", - {'ke': six.text_type(ke)}) - rep_config['sync_retries'] = 200 - rep_config['sync_interval'] = 3 + try: + rep_config_element['sync_retries'] = int( + rep_device['sync_retries']) + rep_config_element['sync_interval'] = int( + rep_device['sync_interval']) + except (KeyError, ValueError) as ke: + LOG.debug( + "SRDF Sync wait/retries options not set or set " + "incorrectly, defaulting to 200 retries with a 3 " + "second wait. Configuration load warning: %(ke)s.", + {'ke': six.text_type(ke)}) + rep_config_element['sync_retries'] = 200 + rep_config_element['sync_interval'] = 3 - allow_extend = target.get('allow_extend', 'false') - if strutils.bool_from_string(allow_extend): - rep_config['allow_extend'] = True - else: - rep_config['allow_extend'] = False - - rep_mode = target.get('mode', '') - if rep_mode.lower() in ['async', 'asynchronous']: - rep_config['mode'] = REP_ASYNC - elif rep_mode.lower() == 'metro': - rep_config['mode'] = REP_METRO - metro_bias = target.get('metro_use_bias', 'false') - if strutils.bool_from_string(metro_bias): - rep_config[METROBIAS] = True + allow_extend = rep_device.get('allow_extend', 'false') + if strutils.bool_from_string(allow_extend): + rep_config_element['allow_extend'] = True else: - rep_config[METROBIAS] = False - else: - rep_config['mode'] = REP_SYNC + rep_config_element['allow_extend'] = False + rep_mode = rep_device.get('mode', '') + if rep_mode.lower() in ['async', 'asynchronous']: + rep_config_element['mode'] = REP_ASYNC + elif rep_mode.lower() == 'metro': + rep_config_element['mode'] = REP_METRO + metro_bias = rep_device.get('metro_use_bias', 'false') + if strutils.bool_from_string(metro_bias): + rep_config_element[METROBIAS] = True + else: + rep_config_element[METROBIAS] = False + else: + rep_config_element['mode'] = REP_SYNC + + backend_id = rep_device.get(BACKEND_ID, '') + if backend_id: + rep_config_element[BACKEND_ID] = backend_id + + rep_config.append(rep_config_element) return rep_config @staticmethod @@ -737,12 +771,12 @@ class PowerMaxUtils(object): :param rep_config: the replication configuration :returns: group name """ - async_grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg" - % {'rdf': rep_config['rdf_group_label'], - 'mode': rep_config['mode']}) - LOG.debug("The async/ metro rdf managed group name is %(name)s", - {'name': async_grp_name}) - return async_grp_name + grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg" % + {'rdf': rep_config['rdf_group_label'], + 'mode': rep_config['mode']}) + LOG.debug("The rdf managed group name is %(name)s", + {'name': grp_name}) + return grp_name def is_metro_device(self, rep_config, extra_specs): """Determine if a volume is a Metro enabled device. @@ -753,7 +787,7 @@ class PowerMaxUtils(object): """ is_metro = (True if self.is_replication_enabled(extra_specs) and rep_config is not None - and rep_config['mode'] == REP_METRO else False) + and rep_config.get('mode') == REP_METRO else False) return is_metro def does_vol_need_rdf_management_group(self, extra_specs): @@ -1046,6 +1080,64 @@ class PowerMaxUtils(object): message=exception_message) return property_dict + @staticmethod + def validate_multiple_rep_device(rep_devices): + """Validate the validity of multiple replication devices. + + Validates uniqueness and presence of backend ids in rep_devices, + consistency in target arrays and replication modes when multiple + replication devices are present in cinder.conf. + + :param rep_devices: rep_devices imported from cinder.conf --list + """ + rdf_group_labels = set() + backend_ids = set() + rep_modes = set() + target_arrays = set() + for rep_device in rep_devices: + backend_id = rep_device.get(BACKEND_ID) + if backend_id: + if backend_id in backend_ids: + msg = (_('Backend IDs must be unique across all ' + 'rep_device when multiple replication devices ' + 'are defined in cinder.conf, backend_id %s is ' + 'defined more than once.') % backend_id) + raise exception.InvalidConfigurationValue(msg) + else: + msg = _('Backend IDs must be assigned for each rep_device ' + 'when multiple replication devices are defined in ' + 'cinder.conf.') + raise exception.InvalidConfigurationValue(msg) + backend_ids.add(backend_id) + + rdf_group_label = rep_device.get('rdf_group_label') + if rdf_group_label in rdf_group_labels: + msg = (_('RDF Group Labels must be unique across all ' + 'rep_device when multiple replication devices are ' + 'defined in cinder.conf. RDF Group Label %s is ' + 'defined more than once.') % rdf_group_label) + raise exception.InvalidConfigurationValue(msg) + rdf_group_labels.add(rdf_group_label) + + rep_mode = rep_device.get('mode', REP_SYNC) + if rep_mode in rep_modes: + msg = (_('RDF Modes must be unique across all ' + 'replication_device. Found multiple instances of %s ' + 'mode defined in cinder.conf.') % rep_mode) + raise exception.InvalidConfigurationValue(msg) + rep_modes.add(rep_mode) + + target_device_id = rep_device.get('target_device_id') + target_arrays.add(target_device_id) + + target_arrays.discard(None) + if len(target_arrays) > 1: + msg = _('Found multiple target_device_id set in cinder.conf. A ' + 'single target_device_id value must be used across all ' + 'replication device when defining using multiple ' + 'replication devices.') + raise exception.InvalidConfigurationValue(msg) + @staticmethod def compare_cylinders(cylinders_source, cylinder_target): """Compare number of cylinders of source and target. @@ -1556,19 +1648,27 @@ class PowerMaxUtils(object): return payload @staticmethod - def is_retype_supported(volume, src_extra_specs, tgt_extra_specs): + def is_retype_supported(volume, src_extra_specs, tgt_extra_specs, + rep_configs): """Determine if a retype operation involving Metro is supported. :param volume: the volume object -- obj :param src_extra_specs: the source extra specs -- dict :param tgt_extra_specs: the target extra specs -- dict + :param rep_configs: imported cinder.conf replication devices -- dict :returns: is supported -- bool """ if volume.attach_status == 'detached': return True src_rep_mode = src_extra_specs.get('rep_mode', None) - tgt_rep_mode = tgt_extra_specs.get('rep_mode', None) + tgt_rep_mode = None + if PowerMaxUtils.is_replication_enabled(tgt_extra_specs): + target_backend_id = tgt_extra_specs.get( + REPLICATION_DEVICE_BACKEND_ID, BACKEND_ID_LEGACY_REP) + target_rep_config = PowerMaxUtils.get_rep_config( + target_backend_id, rep_configs) + tgt_rep_mode = target_rep_config.get('mode', REP_SYNC) if tgt_rep_mode != REP_METRO: return True @@ -1578,3 +1678,196 @@ class PowerMaxUtils(object): else: if not src_rep_mode or src_rep_mode in [REP_SYNC, REP_ASYNC]: return False + + @staticmethod + def get_rep_config(backend_id, rep_configs): + """Get rep_config for given backend_id. + + :param backend_id: rep config search key -- str + :param rep_configs: backend rep_configs -- list + :returns: rep_config -- dict + """ + if len(rep_configs) == 1: + rep_device = rep_configs[0] + else: + rep_device = None + for rep_config in rep_configs: + if rep_config[BACKEND_ID] == backend_id: + rep_device = rep_config + if rep_device is None: + msg = _('Could not find a rep_device with a backend_id of ' + '%s. Please confirm that the ' + 'replication_device_backend_id extra spec for this ' + 'volume type matches the backend_id of the intended ' + 'rep_device in cinder.conf') % backend_id + LOG.error(msg) + raise exception.InvalidInput('Unable to get rep config.') + return rep_device + + @staticmethod + def get_replication_targets(rep_configs): + """Set the replication targets for the backend. + + :param rep_configs: backend rep_configs -- list + :returns: arrays configured for replication -- list + """ + replication_targets = set() + if rep_configs: + for rep_config in rep_configs: + array = rep_config.get(ARRAY) + if array: + replication_targets.add(array) + return list(replication_targets) + + def validate_failover_request(self, is_failed_over, failover_backend_id, + rep_configs): + """Validate failover_host request's parameters + + Validate that a failover_host operation can be performed with + the user entered parameters and system configuration/state + + :param is_failed_over: current failover state + :param failover_backend_id: backend_id given during failover request + :param rep_configs: backend rep_configs -- list + :return: (bool, str) is valid, reason on invalid + """ + is_valid = True + msg = "" + if is_failed_over: + if failover_backend_id != 'default': + is_valid = False + msg = _('Cannot failover, the backend is already in a failed ' + 'over state, if you meant to failback, please add ' + '--backend_id default to the command.') + else: + if failover_backend_id == 'default': + is_valid = False + msg = _('Cannot failback, backend is not in a failed over ' + 'state. If you meant to failover, please either omit ' + 'the --backend_id parameter or use the --backend_id ' + 'parameter with a valid backend id.') + elif len(rep_configs) > 1: + if failover_backend_id is None: + is_valid = False + msg = _('Cannot failover, no backend_id provided while ' + 'multiple replication devices are defined in ' + 'cinder.conf, please provide a backend_id ' + 'which will act as new primary array by ' + 'appending --backend_id to your command.') + else: + rc = self.get_rep_config(failover_backend_id, rep_configs) + if rc is None: + is_valid = False + msg = _('Can not find replication device with ' + 'backend_id of %s') % failover_backend_id + return is_valid, msg + + def validate_replication_group_config(self, rep_configs, extra_specs_list): + """Validate replication group configuration + + Validate the extra specs of volume types being added to + a volume group against rep_config imported from cinder.conf + + :param rep_configs: list of replication_device dicts from cinder.conf + :param extra_specs_list: extra_specs of volume types added to group + :raises InvalidInput: If any of the validation check fail + """ + if not rep_configs: + LOG.error('No replication devices set in cinder.conf please ' + 'disable replication in Volume Group extra specs ' + 'or add replication device to cinder.conf.') + msg = _('No replication devices are defined in cinder.conf, ' + 'can not enable volume group replication.') + raise exception.InvalidInput(reason=msg) + + rep_group_backend_ids = set() + for extra_specs in extra_specs_list: + target_backend_id = extra_specs.get( + REPLICATION_DEVICE_BACKEND_ID, + BACKEND_ID_LEGACY_REP) + try: + target_rep_config = self.get_rep_config( + target_backend_id, rep_configs) + rep_group_backend_ids.add(target_backend_id) + except exception.InvalidInput: + target_rep_config = None + + if not (extra_specs.get(IS_RE) == ' True'): + # Replication is disabled or not set to correct value + # in the Volume Type being added + msg = _('Replication is not enabled for a Volume Type, ' + 'all Volume Types in a replication enabled ' + 'Volume Group must have replication enabled.') + raise exception.InvalidInput(reason=msg) + + if not target_rep_config: + # Unable to determine rep_configs to use. + msg = _('Unable to determine which rep_device to use from ' + 'cinder.conf. Could not validate volume types being ' + 'added to group.') + raise exception.InvalidInput(reason=msg) + + # Verify that replication is Synchronous mode + if not target_rep_config.get('mode'): + LOG.warning('Unable to verify the replication mode ' + 'of Volume Type, please ensure only ' + 'Synchronous replication is used.') + elif not target_rep_config['mode'] == REP_SYNC: + msg = _('Replication for Volume Type is not set ' + 'to Synchronous. Only Synchronous ' + 'can be used with replication groups') + raise exception.InvalidInput(reason=msg) + + if len(rep_group_backend_ids) > 1: + # We should only have a single backend_id + # (replication type) across all the Volume Types + msg = _('Multiple replication backend ids detected ' + 'please ensure only a single replication device ' + '(backend_id) is used for all Volume Types in a ' + 'Volume Group.') + raise exception.InvalidInput(reason=msg) + + @staticmethod + def validate_non_replication_group_config(extra_specs_list): + """Validate volume group configuration + + Validate that none of the Volume Type extra specs are + replication enabled. + + :param extra_specs_list: list of Volume Type extra specs + :return: bool replication enabled found in any extra specs + """ + for extra_specs in extra_specs_list: + if extra_specs.get(IS_RE) == ' True': + msg = _('Replication is enabled in one or more of the ' + 'Volume Types being added to new Volume Group but ' + 'the Volume Group is not replication enabled. Please ' + 'enable replication in the Volume Group or select ' + 'only non-replicated Volume Types.') + raise exception.InvalidInput(reason=msg) + + @staticmethod + def get_migration_delete_extra_specs(volume, extra_specs, rep_configs): + """Get previous extra specs rep details during migration delete + + :param volume: volume object -- volume + :param extra_specs: volumes extra specs -- dict + :param rep_configs: imported cinder.conf replication devices -- dict + :returns: updated extra specs -- dict + """ + metadata = volume.metadata + replication_enabled = strutils.bool_from_string( + metadata.get(IS_RE_CAMEL, 'False')) + if replication_enabled: + rdfg_label = metadata['RDFG-Label'] + rep_config = next( + (r_c for r_c in rep_configs if r_c[ + 'rdf_group_label'] == rdfg_label), None) + + extra_specs[IS_RE] = replication_enabled + extra_specs[REP_MODE] = metadata['ReplicationMode'] + extra_specs[REP_CONFIG] = rep_config + extra_specs[REPLICATION_DEVICE_BACKEND_ID] = rep_config[BACKEND_ID] + else: + extra_specs.pop(IS_RE, None) + return extra_specs diff --git a/releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml b/releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml new file mode 100644 index 00000000000..c838777ec43 --- /dev/null +++ b/releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + PowerMax Driver - Support to allow the use of multiple replication modes on + one backend array.