diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 89dca0c5aa1..e7d48bc8cd1 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -2877,6 +2877,16 @@ class VMAXProvisionTest(test.TestCase): self.data.group_snapshot_name, self.data.extra_specs, unlink=True) + @mock.patch.object(rest.VMAXRest, 'get_storage_group', + side_effect=[None, VMAXCommonData.sg_details[1]]) + @mock.patch.object(provision.VMAXProvision, 'create_volume_group') + def test_get_or_create_volume_group(self, mock_create, mock_sg): + for x in range(0, 2): + self.provision.get_or_create_volume_group( + self.data.array, self.data.test_group, self.data.extra_specs) + self.assertEqual(2, mock_sg.call_count) + self.assertEqual(1, mock_create.call_count) + class VMAXCommonTest(test.TestCase): def setUp(self): @@ -2999,7 +3009,7 @@ class VMAXCommonTest(test.TestCase): self.common._remove_members(array, volume, device_id, extra_specs, self.data.connector) mock_rm.assert_called_once_with( - array, device_id, volume_name, + array, volume, device_id, volume_name, extra_specs, True, self.data.connector) def test_unmap_lun(self): @@ -3567,9 +3577,9 @@ class VMAXCommonTest(test.TestCase): with mock.patch.object( self.common, 'cleanup_lun_replication') as mock_clean: self.common._remove_vol_and_cleanup_replication( - array, device_id, volume_name, extra_specs) + array, device_id, volume_name, extra_specs, volume) mock_rm.assert_called_once_with( - array, device_id, volume_name, extra_specs, False) + array, volume, device_id, volume_name, extra_specs, False) mock_clean.assert_not_called() self.common._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) @@ -3929,7 +3939,7 @@ class VMAXCommonTest(test.TestCase): self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) self.common._migrate_volume.assert_called_once_with( - extra_specs[utils.ARRAY], device_id, + extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], 'Silver', 'OLTP', volume_name, new_type, extra_specs) @@ -3974,7 +3984,7 @@ class VMAXCommonTest(test.TestCase): extra_specs) self.assertTrue(bool(migrate_status)) self.common._migrate_volume.assert_called_once_with( - extra_specs[utils.ARRAY], device_id, + extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], self.data.slo, self.data.workload, volume_name, new_type, extra_specs) @@ -3985,25 +3995,28 @@ class VMAXCommonTest(test.TestCase): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs + volume = self.data.test_volume new_type = {'extra_specs': {}} migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) + self.data.array, volume, device_id, self.data.srp, + self.data.slo, self.data.workload, volume_name, + new_type, extra_specs) self.assertTrue(migrate_status) target_extra_specs = { 'array': self.data.array, 'interval': 3, 'retries': 120, 'slo': self.data.slo, 'srp': self.data.srp, 'workload': self.data.workload} mock_remove.assert_called_once_with( - self.data.array, device_id, volume_name, + self.data.array, volume, device_id, volume_name, target_extra_specs, reset=True) mock_remove.reset_mock() with mock.patch.object( self.rest, 'get_storage_groups_from_volume', return_value=[]): migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) + self.data.array, volume, device_id, self.data.srp, + self.data.slo, self.data.workload, volume_name, + new_type, extra_specs) self.assertTrue(migrate_status) mock_remove.assert_not_called() @@ -4017,7 +4030,8 @@ class VMAXCommonTest(test.TestCase): self.masking, 'get_or_create_default_storage_group', side_effect=exception.VolumeBackendAPIException): migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, + self.data.array, self.data.test_volume, device_id, + self.data.srp, self.data.slo, self.data.workload, volume_name, new_type, extra_specs) self.assertFalse(migrate_status) @@ -4030,7 +4044,8 @@ class VMAXCommonTest(test.TestCase): self.rest, 'is_volume_in_storagegroup', return_value=False): migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, + self.data.array, self.data.test_volume, device_id, + self.data.srp, self.data.slo, self.data.workload, volume_name, new_type, extra_specs) self.assertFalse(migrate_status) @@ -4080,26 +4095,6 @@ class VMAXCommonTest(test.TestCase): self.data.srp, volume_name, False) self.assertEqual(ref_return, return_val) - def test_find_volume_group_name_from_id(self): - array = self.data.array - group_id = 'GrpId' - group_name = None - ref_group_name = self.data.storagegroup_name_with_id - with mock.patch.object( - self.rest, 'get_storage_group_list', - return_value=self.data.sg_list_rep): - group_name = self.common._find_volume_group_name_from_id( - array, group_id) - self.assertEqual(ref_group_name, group_name) - - def test_find_volume_group_name_from_id_not_found(self): - array = self.data.array - group_id = 'GrpId' - group_name = None - group_name = self.common._find_volume_group_name_from_id( - array, group_id) - self.assertIsNone(group_name) - def test_find_volume_group(self): group = self.data.test_group_1 array = self.data.array @@ -4845,7 +4840,8 @@ class VMAXMaskingTest(test.TestCase): 'get_or_create_masking_view_and_map_lun') def test_setup_masking_view(self, mock_get_or_create_mv): self.driver.masking.setup_masking_view( - self.data.array, self.maskingviewdict, self.extra_specs) + self.data.array, self.data.test_volume, + self.maskingviewdict, self.extra_specs) mock_get_or_create_mv.assert_called_once() @mock.patch.object( @@ -4869,19 +4865,22 @@ class VMAXMaskingTest(test.TestCase): mock_add_volume): rollback_dict = ( self.driver.masking.get_or_create_masking_view_and_map_lun( - self.data.array, self.maskingviewdict['maskingview_name'], + self.data.array, self.data.test_volume, + self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs)) self.assertEqual(self.maskingviewdict, rollback_dict) self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.get_or_create_masking_view_and_map_lun, - self.data.array, self.maskingviewdict['maskingview_name'], + self.data.array, self.data.test_volume, + self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs) self.maskingviewdict['slo'] = None self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.get_or_create_masking_view_and_map_lun, - self.data.array, self.maskingviewdict['maskingview_name'], + self.data.array, self.data.test_volume, + self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs) @mock.patch.object( @@ -5246,14 +5245,16 @@ class VMAXMaskingTest(test.TestCase): self.assertRaises( exception.VolumeBackendAPIException, self.mask.check_if_rollback_action_for_masking_required, - self.data.array, self.device_id, self.maskingviewdict) + self.data.array, self.data.test_volume, + self.device_id, self.maskingviewdict) with mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members'): self.maskingviewdict[ 'default_sg_name'] = self.data.defaultstoragegroup_name error_message = ( self.mask.check_if_rollback_action_for_masking_required( - self.data.array, self.device_id, self.maskingviewdict)) + self.data.array, self.data.test_volume, + self.device_id, self.maskingviewdict)) self.assertIsNone(error_message) @mock.patch.object(rest.VMAXRest, 'delete_masking_view') @@ -5328,13 +5329,14 @@ class VMAXMaskingTest(test.TestCase): @mock.patch.object(masking.VMAXMasking, '_cleanup_deletion') def test_remove_and_reset_members(self, mock_cleanup): - self.mask.remove_and_reset_members(self.data.array, self.device_id, - self.volume_name, self.extra_specs, - reset=False) + self.mask.remove_and_reset_members( + self.data.array, self.device_id, self.data.test_volume, + self.volume_name, self.extra_specs, reset=False) mock_cleanup.assert_called_once() @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', side_effect=[[VMAXCommonData.storagegroup_name_i], + [VMAXCommonData.storagegroup_name_i], [VMAXCommonData.storagegroup_name_i, VMAXCommonData.storagegroup_name_f]]) @mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg') @@ -5342,14 +5344,19 @@ class VMAXMaskingTest(test.TestCase): 'add_volume_to_default_storage_group') def test_cleanup_deletion(self, mock_add, mock_remove_vol, mock_get_sg): self.mask._cleanup_deletion( - self.data.array, self.device_id, self.volume_name, - self.extra_specs, None, True) + self.data.array, self.data.test_volume, self.device_id, + self.volume_name, self.extra_specs, None, True) mock_add.assert_not_called() self.mask._cleanup_deletion( - self.data.array, self.device_id, self.volume_name, - self.extra_specs, None, True) - mock_add.assert_called_once_with(self.data.array, self.device_id, - self.volume_name, self.extra_specs) + self.data.array, self.data.test_volume, self.device_id, + self.volume_name, self.extra_specs, self.data.connector, True) + mock_add.assert_not_called() + self.mask._cleanup_deletion( + self.data.array, self.data.test_volume, self.device_id, + self.volume_name, self.extra_specs, None, True) + mock_add.assert_called_once_with( + self.data.array, self.device_id, + self.volume_name, self.extra_specs, volume=self.data.test_volume) @mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg') @mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg') @@ -5491,6 +5498,14 @@ class VMAXMaskingTest(test.TestCase): self.data.array, self.device_id, self.volume_name, self.extra_specs, src_sg=self.data.storagegroup_name_i) mock_move.assert_called_once() + mock_add_sg.reset_mock() + vol_grp_member = deepcopy(self.data.test_volume) + vol_grp_member.group_id = self.data.test_vol_grp_name_id_only + vol_grp_member.group = self.data.test_group + self.mask.add_volume_to_default_storage_group( + self.data.array, self.device_id, self.volume_name, + self.extra_specs, volume=vol_grp_member) + self.assertEqual(2, mock_add_sg.call_count) @mock.patch.object(provision.VMAXProvision, 'create_storage_group') def test_get_or_create_default_storage_group(self, mock_create_sg): @@ -5917,8 +5932,8 @@ class VMAXCommonReplicationTest(test.TestCase): self.data.device_id2, self.data.rdf_group_no, "1", rep_extra_specs) mock_rm.assert_called_once_with( - self.data.remote_array, self.data.device_id2, "1", - rep_extra_specs, False) + self.data.remote_array, self.data.test_volume, + self.data.device_id2, "1", rep_extra_specs, False) # Cleanup legacy replication self.common.cleanup_lun_replication( self.data.test_legacy_vol, "1", self.data.device_id, @@ -6122,9 +6137,9 @@ class VMAXCommonReplicationTest(test.TestCase): rep_config = self.utils.get_replication_config( [self.replication_device]) self.common.enable_rdf( - self.data.array, self.data.device_id, self.data.rdf_group_no, - rep_config, 'OS-1', self.data.remote_array, self.data.device_id2, - self.extra_specs) + self.data.array, self.data.test_volume, self.data.device_id, + self.data.rdf_group_no, rep_config, 'OS-1', + self.data.remote_array, self.data.device_id2, self.extra_specs) self.assertEqual(2, mock_remove.call_count) self.assertEqual(2, mock_add.call_count) @@ -6135,7 +6150,7 @@ class VMAXCommonReplicationTest(test.TestCase): [self.replication_device]) self.assertRaises( exception.VolumeBackendAPIException, self.common.enable_rdf, - self.data.array, self.data.device_id, + self.data.array, self.data.test_volume, self.data.device_id, self.data.failed_resource, rep_config, 'OS-1', self.data.remote_array, self.data.device_id2, self.extra_specs) self.assertEqual(1, mock_cleanup.call_count) diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index 81e66efcb1e..9e9ca1a4fb4 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -254,19 +254,17 @@ class VMAXCommon(object): volume_name, volume_size, extra_specs)) if volume.group_id is not None: - group_name = self._find_volume_group_name_from_id( - extra_specs[utils.ARRAY], volume.group_id) - if group_name is not None: - self.masking.add_volume_to_storage_group( - extra_specs[utils.ARRAY], volume_dict['device_id'], - group_name, volume_name, extra_specs) + group_name = self.provision.get_or_create_volume_group( + extra_specs[utils.ARRAY], volume.group, extra_specs) + self.masking.add_volume_to_storage_group( + extra_specs[utils.ARRAY], volume_dict['device_id'], + group_name, volume_name, extra_specs) # Set-up volume replication, if enabled if self.utils.is_replication_enabled(extra_specs): rep_update = self._replicate_volume(volume, volume_name, volume_dict, extra_specs) model_update.update(rep_update) - LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", {'name': volume_name, 'dict': volume_dict}) model_update.update( @@ -424,7 +422,8 @@ class VMAXCommon(object): volume_name = volume.name LOG.debug("Detaching volume %s.", volume_name) return self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, True, connector) + array, volume, device_id, volume_name, + extra_specs, True, connector) def _unmap_lun(self, volume, connector): """Unmaps a volume from the host. @@ -587,7 +586,7 @@ class VMAXCommon(object): else: masking_view_dict['isLiveMigration'] = False rollback_dict = self.masking.setup_masking_view( - masking_view_dict[utils.ARRAY], + masking_view_dict[utils.ARRAY], volume, masking_view_dict, extra_specs) # Find host lun id again after the volume is exported to the host. @@ -1495,7 +1494,7 @@ class VMAXCommon(object): raise exception.VolumeBackendAPIException(data=error_message) def _remove_vol_and_cleanup_replication( - self, array, device_id, volume_name, extra_specs, volume=None): + self, array, device_id, volume_name, extra_specs, volume): """Remove a volume from its storage groups and cleanup replication. :param array: the array serial number @@ -1506,7 +1505,7 @@ class VMAXCommon(object): """ # Remove from any storage groups self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, False) + array, volume, device_id, volume_name, extra_specs, False) # Cleanup remote replication if self.utils.is_replication_enabled(extra_specs): self.cleanup_lun_replication(volume, volume_name, @@ -1953,20 +1952,21 @@ class VMAXCommon(object): 'targetHost': host['host'], 'cc': do_change_compression}) return self._migrate_volume( - extra_specs[utils.ARRAY], device_id, + extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], target_slo, target_workload, volume_name, new_type, extra_specs) return False def _migrate_volume( - self, array, device_id, srp, target_slo, + self, array, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs): """Migrate from one slo/workload combination to another. This requires moving the volume from its current SG to a new or existing SG that has the target attributes. :param array: the array serial number + :param volume: the volume object :param device_id: the device number :param srp: the storage resource pool :param target_slo: the target service level @@ -2005,7 +2005,7 @@ class VMAXCommon(object): array, device_id, target_sg_name, volume_name, extra_specs) else: self.masking.remove_and_reset_members( - array, device_id, volume_name, target_extra_specs, + array, volume, device_id, volume_name, target_extra_specs, reset=True) # Check that it has been added. @@ -2145,7 +2145,7 @@ class VMAXCommon(object): # Enable rdf replication and establish the link rdf_dict = self.enable_rdf( - array, device_id, rdf_group_no, self.rep_config, + array, volume, device_id, rdf_group_no, self.rep_config, target_name, remote_array, target_device_id, extra_specs) LOG.info('Successfully setup replication for %s.', @@ -2189,7 +2189,7 @@ class VMAXCommon(object): if target_device is not None: # Clean-up target self.masking.remove_and_reset_members( - remote_array, target_device, volume_name, + remote_array, volume, target_device, volume_name, rep_extra_specs, False) self._cleanup_remote_target( array, remote_array, device_id, target_device, @@ -2494,13 +2494,13 @@ class VMAXCommon(object): # have a mix of replicated and non-replicated volumes as # the SRDF groups become unmanageable). self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, False) + array, volume, device_id, volume_name, extra_specs, False) # Repeat on target side rep_extra_specs = self._get_replication_extra_specs( extra_specs, self.rep_config) self.masking.remove_and_reset_members( - remote_array, target_device, volume_name, + remote_array, volume, target_device, volume_name, rep_extra_specs, False) LOG.info("Breaking replication relationship...") @@ -2539,11 +2539,12 @@ class VMAXCommon(object): LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) - def enable_rdf(self, array, device_id, rdf_group_no, rep_config, + def enable_rdf(self, array, volume, device_id, rdf_group_no, rep_config, target_name, remote_array, target_device, extra_specs): """Create a replication relationship with a target volume. :param array: the array serial number + :param volume: the volume object :param device_id: the device id :param rdf_group_no: the rdf group number :param rep_config: the replication config @@ -2559,10 +2560,10 @@ class VMAXCommon(object): # Remove source and target instances from their # default storage groups self.masking.remove_and_reset_members( - array, device_id, target_name, extra_specs, False) + array, volume, device_id, target_name, extra_specs, False) self.masking.remove_and_reset_members( - remote_array, target_device, target_name, + remote_array, volume, target_device, target_name, rep_extra_specs, False) # Establish replication relationship @@ -2585,7 +2586,7 @@ class VMAXCommon(object): "group. Volume name: %(name)s "), {'name': target_name}) self.masking.remove_and_reset_members( - remote_array, target_device, target_name, + remote_array, volume, target_device, target_name, rep_extra_specs, False) self._cleanup_remote_target( array, remote_array, device_id, target_device, @@ -3009,21 +3010,6 @@ class VMAXCommon(object): return model_update, snapshots_model_update - def _find_volume_group_name_from_id(self, array, group_id): - """Finds the volume group name given its id - - :param array: the array serial number - :param group_id: the group id - :returns: group_name: Name of the group - """ - group_name = None - sg_list = self.rest.get_storage_group_list(array) - for sg in sg_list: - if group_id in sg: - group_name = sg - return group_name - return group_name - def _find_volume_group(self, array, group): """Finds a volume group given the group. diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index 1cf6d3df09c..2b337fa44e9 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -40,24 +40,25 @@ class VMAXMasking(object): self.provision = provision.VMAXProvision(self.rest) def setup_masking_view( - self, serial_number, masking_view_dict, extra_specs): + self, serial_number, volume, masking_view_dict, extra_specs): @coordination.synchronized("emc-mv-{maskingview_name}") def do_get_or_create_masking_view_and_map_lun(maskingview_name): return self.get_or_create_masking_view_and_map_lun( - serial_number, maskingview_name, masking_view_dict, + serial_number, volume, maskingview_name, masking_view_dict, extra_specs) return do_get_or_create_masking_view_and_map_lun( masking_view_dict[utils.MV_NAME]) def get_or_create_masking_view_and_map_lun( - self, serial_number, maskingview_name, masking_view_dict, + self, serial_number, volume, maskingview_name, masking_view_dict, extra_specs): """Get or Create a masking view and add a volume to the storage group. Given a masking view dict either get or create a masking view and add the volume to the associated storage group. :param serial_number: the array serial number + :param volume: the volume object :param maskingview_name: the masking view name :param masking_view_dict: the masking view dict :param extra_specs: the extra specifications @@ -112,7 +113,7 @@ class VMAXMasking(object): if rollback_dict['slo'] is not None: self.check_if_rollback_action_for_masking_required( - serial_number, device_id, masking_view_dict) + serial_number, volume, device_id, masking_view_dict) else: self._check_adding_volume_to_storage_group( @@ -832,7 +833,7 @@ class VMAXMasking(object): return error_message def check_if_rollback_action_for_masking_required( - self, serial_number, device_id, rollback_dict): + self, serial_number, volume, device_id, rollback_dict): """Rollback action for volumes with an associated service level. We need to be able to return the volume to the default storage group @@ -841,6 +842,7 @@ class VMAXMasking(object): the exception occurred. We also may need to clean up any unused initiator groups. :param serial_number: the array serial number + :param volume: the volume object :param device_id: the device id :param rollback_dict: the rollback dict :returns: error message -- string, or None @@ -883,9 +885,10 @@ class VMAXMasking(object): # Remove it from its current storage group and return it # to its default masking view if slo is defined. self.remove_and_reset_members( - serial_number, device_id, + serial_number, volume, device_id, rollback_dict['volume_name'], - rollback_dict['extra_specs']) + rollback_dict['extra_specs'], True, + rollback_dict['connector']) message = (_("Rollback - Volume in another storage " "group besides default storage group.")) except Exception as e: @@ -1019,11 +1022,12 @@ class VMAXMasking(object): @coordination.synchronized("emc-vol-{device_id}") def remove_and_reset_members( - self, serial_number, device_id, volume_name, extra_specs, - reset=True, connector=None): + self, serial_number, volume, device_id, volume_name, + extra_specs, reset=True, connector=None): """This is called on a delete, unmap device or rollback. :param serial_number: the array serial number + :param volume: the volume object :param device_id: the volume device id :param volume_name: the volume name :param extra_specs: additional info @@ -1031,33 +1035,48 @@ class VMAXMasking(object): :param connector: the connector object (optional) """ self._cleanup_deletion( - serial_number, device_id, volume_name, + serial_number, volume, device_id, volume_name, extra_specs, connector, reset) def _cleanup_deletion( - self, serial_number, device_id, volume_name, + self, serial_number, volume, device_id, volume_name, extra_specs, connector, reset): """Prepare a volume for a delete operation. :param serial_number: the array serial number + :param volume: the volume object :param device_id: the volume device id :param volume_name: the volume name :param extra_specs: the extra specifications :param connector: the connector object """ move = False + short_host_name = None storagegroup_names = (self.rest.get_storage_groups_from_volume( serial_number, device_id)) if storagegroup_names: if len(storagegroup_names) == 1 and reset is True: move = True - for sg_name in storagegroup_names: - self.remove_volume_from_sg( - serial_number, device_id, volume_name, sg_name, - extra_specs, connector, move) + elif connector is not None and reset is True: + short_host_name = self.utils.get_host_short_name( + connector['host']) + move = True + if short_host_name: + for sg_name in storagegroup_names: + if short_host_name in sg_name: + self.remove_volume_from_sg( + serial_number, device_id, volume_name, sg_name, + extra_specs, connector, move) + break + else: + for sg_name in storagegroup_names: + self.remove_volume_from_sg( + serial_number, device_id, volume_name, sg_name, + extra_specs, connector, move) if reset is True and move is False: self.add_volume_to_default_storage_group( - serial_number, device_id, volume_name, extra_specs) + serial_number, device_id, volume_name, + extra_specs, volume=volume) def remove_volume_from_sg( self, serial_number, device_id, vol_name, storagegroup_name, @@ -1386,7 +1405,7 @@ class VMAXMasking(object): def add_volume_to_default_storage_group( self, serial_number, device_id, volume_name, - extra_specs, src_sg=None): + extra_specs, src_sg=None, volume=None): """Return volume to its default storage group. :param serial_number: the array serial number @@ -1394,6 +1413,7 @@ class VMAXMasking(object): :param volume_name: the volume name :param extra_specs: the extra specifications :param src_sg: the source storage group, if any + :param volume: the volume object """ do_disable_compression = self.utils.is_compression_disabled( extra_specs) @@ -1414,6 +1434,16 @@ class VMAXMasking(object): self._check_adding_volume_to_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) + if volume: + # Need to check if the volume needs to be returned to a + # generic volume group. This may be necessary in a force-detach + # situation. + if volume.group_id is not None: + vol_grp_name = self.provision.get_or_create_volume_group( + serial_number, volume.group, extra_specs) + self._check_adding_volume_to_storage_group( + serial_number, device_id, + vol_grp_name, volume_name, extra_specs) def get_or_create_default_storage_group( self, serial_number, srp, slo, workload, extra_specs, diff --git a/cinder/volume/drivers/dell_emc/vmax/provision.py b/cinder/volume/drivers/dell_emc/vmax/provision.py index a114a759e11..8b0361c2f14 100644 --- a/cinder/volume/drivers/dell_emc/vmax/provision.py +++ b/cinder/volume/drivers/dell_emc/vmax/provision.py @@ -448,6 +448,33 @@ class VMAXProvision(object): self.rest.modify_rdf_device_pair( array, device_id, rdf_group, extra_specs, split=False) + def get_or_create_volume_group(self, array, group, extra_specs): + """Get or create a volume group. + + Sometimes it may be necessary to recreate a volume group on the + backend - for example, when the last member volume has been removed + from the group, but the cinder group object has not been deleted. + :param array: the array serial number + :param group: the group object + :param extra_specs: the extra specifications + :return: group name + """ + vol_grp_name = self.utils.update_volume_group_name(group) + return self.get_or_create_group(array, vol_grp_name, extra_specs) + + def get_or_create_group(self, array, group_name, extra_specs): + """Get or create a generic volume group. + + :param array: the array serial number + :param group_name: the group name + :param extra_specs: the extra specifications + :return: group name + """ + storage_group = self.rest.get_storage_group(array, group_name) + if not storage_group: + self.create_volume_group(array, group_name, extra_specs) + return group_name + def create_volume_group(self, array, group_name, extra_specs): """Create a generic volume group.