From 3b0a58fa0e6017def37acbb1c4f8dabc1a550223 Mon Sep 17 00:00:00 2001 From: Xing Yang Date: Thu, 2 Apr 2015 01:31:56 -0400 Subject: [PATCH] Fix a problem with FAST support in VMAX driver The VMAX driver doesn't distinguish between storage groups for masking views (used for attachment) and storage groups for volumes under FAST policy. This causes problems during attach volume. This patch fixed the problem. Change-Id: Ibb086f336e78c23e4907562d39f460eae3004bca Closes-Bug: #1435069 --- cinder/tests/unit/test_emc_vmax.py | 145 ++++++++++++++++-- cinder/volume/drivers/emc/emc_vmax_common.py | 60 ++++---- cinder/volume/drivers/emc/emc_vmax_fast.py | 28 ++-- cinder/volume/drivers/emc/emc_vmax_fc.py | 3 +- cinder/volume/drivers/emc/emc_vmax_iscsi.py | 3 +- cinder/volume/drivers/emc/emc_vmax_masking.py | 95 +++++++----- cinder/volume/drivers/emc/emc_vmax_utils.py | 46 +++++- 7 files changed, 283 insertions(+), 97 deletions(-) diff --git a/cinder/tests/unit/test_emc_vmax.py b/cinder/tests/unit/test_emc_vmax.py index 621cec5a88b..f141a010c80 100644 --- a/cinder/tests/unit/test_emc_vmax.py +++ b/cinder/tests/unit/test_emc_vmax.py @@ -24,6 +24,7 @@ from oslo_log import log as logging import six from cinder import exception +from cinder.i18n import _ from cinder.openstack.common import loopingcall from cinder import test from cinder.volume.drivers.emc import emc_vmax_common @@ -263,7 +264,8 @@ class EMCVMAXCommonData(object): hardwareid_creationclass = 'EMC_StorageHardwareID' replicationgroup_creationclass = 'CIM_ReplicationGroup' storagepoolid = 'SYMMETRIX+000195900551+U+gold' - storagegroupname = 'OS_default_GOLD1_SG' + storagegroupname = 'OS-fakehost-gold-I-SG' + defaultstoragegroupname = 'OS_default_GOLD1_SG' storagevolume_creationclass = 'EMC_StorageVolume' policyrule = 'gold' poolname = 'gold' @@ -783,12 +785,18 @@ class FakeEcomConnection(object): def _assoc_storagegroup(self): assocs = [] - assoc = CIM_DeviceMaskingGroup() - assoc['ElementName'] = 'OS_default_GOLD1_SG' - assoc['SystemName'] = self.data.storage_system - assoc['CreationClassName'] = 'CIM_DeviceMaskingGroup' - assoc.path = assoc - assocs.append(assoc) + assoc1 = CIM_DeviceMaskingGroup() + assoc1['ElementName'] = self.data.storagegroupname + assoc1['SystemName'] = self.data.storage_system + assoc1['CreationClassName'] = 'CIM_DeviceMaskingGroup' + assoc1.path = assoc1 + assocs.append(assoc1) + assoc2 = CIM_DeviceMaskingGroup() + assoc2['ElementName'] = self.data.defaultstoragegroupname + assoc2['SystemName'] = self.data.storage_system + assoc2['CreationClassName'] = 'CIM_DeviceMaskingGroup' + assoc2.path = assoc2 + assocs.append(assoc2) return assocs def _assoc_portgroup(self): @@ -981,8 +989,17 @@ class FakeEcomConnection(object): def _getinstance_devicemaskinggroup(self, objectpath): targetmaskinggroup = {} - targetmaskinggroup['CreationClassName'] = 'CIM_DeviceMaskingGroup' - targetmaskinggroup['ElementName'] = 'OS_default_GOLD1_SG' + if 'CreationClassName' in objectpath: + targetmaskinggroup['CreationClassName'] = ( + objectpath['CreationClassName']) + else: + targetmaskinggroup['CreationClassName'] = ( + 'CIM_DeviceMaskingGroup') + if 'ElementName' in objectpath: + targetmaskinggroup['ElementName'] = objectpath['ElementName'] + else: + targetmaskinggroup['ElementName'] = ( + self.data.storagegroupname) return targetmaskinggroup def _getinstance_unit(self, objectpath): @@ -1324,11 +1341,27 @@ class FakeEcomConnection(object): def _enum_storagegroup(self): storagegroups = [] - storagegroup = {} - storagegroup['CreationClassName'] = ( + storagegroup1 = {} + storagegroup1['CreationClassName'] = ( self.data.storagegroup_creationclass) - storagegroup['ElementName'] = self.data.storagegroupname - storagegroups.append(storagegroup) + storagegroup1['ElementName'] = self.data.storagegroupname + storagegroups.append(storagegroup1) + storagegroup2 = {} + storagegroup2['CreationClassName'] = ( + self.data.storagegroup_creationclass) + storagegroup2['ElementName'] = self.data.defaultstoragegroupname + storagegroup2['SystemName'] = self.data.storage_system + storagegroups.append(storagegroup2) + storagegroup3 = {} + storagegroup3['CreationClassName'] = ( + self.data.storagegroup_creationclass) + storagegroup3['ElementName'] = 'OS-fakehost-SRP_1-Bronze-DSS-SG' + storagegroups.append(storagegroup3) + storagegroup4 = {} + storagegroup4['CreationClassName'] = ( + self.data.storagegroup_creationclass) + storagegroup4['ElementName'] = 'OS-SRP_1-Bronze-DSS-SG' + storagegroups.append(storagegroup4) return storagegroups def _enum_storagevolume(self): @@ -1659,6 +1692,90 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): self.driver.utils._get_hardware_type(bogus_initiator)) self.assertEqual(0, hardwaretypeid) + def test_check_if_rollback_action_for_masking_required(self): + conn = self.fake_ecom_connection() + controllerConfigService = ( + self.driver.utils.find_controller_configuration_service( + conn, self.data.storage_system)) + extraSpecs = {'volume_backend_name': 'GOLD_BE', + 'isV3': False, + 'storagetype:fastpolicy': 'GOLD1'} + + vol = EMC_StorageVolume() + vol['name'] = self.data.test_volume['name'] + vol['CreationClassName'] = 'Symm_StorageVolume' + vol['ElementName'] = self.data.test_volume['id'] + vol['DeviceID'] = self.data.test_volume['device_id'] + vol['Id'] = self.data.test_volume['id'] + vol['SystemName'] = self.data.storage_system + vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] + vol['BlockSize'] = self.data.test_volume['BlockSize'] + + # Added vol to vol.path + vol['SystemCreationClassName'] = 'Symm_StorageSystem' + vol.path = vol + vol.path.classname = vol['CreationClassName'] + + rollbackDict = {} + rollbackDict['isV3'] = False + rollbackDict['defaultStorageGroupInstanceName'] = ( + self.data.default_storage_group) + rollbackDict['sgName'] = self.data.storagegroupname + rollbackDict['volumeName'] = 'vol1' + rollbackDict['fastPolicyName'] = 'GOLD1' + rollbackDict['volumeInstance'] = vol + rollbackDict['controllerConfigService'] = controllerConfigService + rollbackDict['extraSpecs'] = extraSpecs + # Path 1 - The volume is in another storage group that isn't the + # default storage group + expectedmessage = (_("V2 rollback - Volume in another storage " + "group besides default storage group.")) + message = ( + self.driver.common.masking. + _check_if_rollback_action_for_masking_required( + conn, rollbackDict)) + self.assertEqual(expectedmessage, message) + # Path 2 - The volume is not in any storage group + rollbackDict['sgName'] = 'sq_not_exist' + expectedmessage = (_("V2 rollback, volume is not in any storage " + "group.")) + message = ( + self.driver.common.masking. + _check_if_rollback_action_for_masking_required( + conn, rollbackDict)) + self.assertEqual(expectedmessage, message) + + def test_migrate_cleanup(self): + conn = self.fake_ecom_connection() + extraSpecs = {'volume_backend_name': 'GOLD_BE', + 'isV3': False, + 'storagetype:fastpolicy': 'GOLD1'} + + vol = EMC_StorageVolume() + vol['name'] = self.data.test_volume['name'] + vol['CreationClassName'] = 'Symm_StorageVolume' + vol['ElementName'] = self.data.test_volume['id'] + vol['DeviceID'] = self.data.test_volume['device_id'] + vol['Id'] = self.data.test_volume['id'] + vol['SystemName'] = self.data.storage_system + vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] + vol['BlockSize'] = self.data.test_volume['BlockSize'] + + # Added vol to vol.path + vol['SystemCreationClassName'] = 'Symm_StorageSystem' + vol.path = vol + vol.path.classname = vol['CreationClassName'] + # The volume is already belong to default storage group + return_to_default = self.driver.common._migrate_cleanup( + conn, vol, self.data.storage_system, 'GOLD1', + vol['name'], extraSpecs) + self.assertFalse(return_to_default) + # The volume does not belong to default storage group + return_to_default = self.driver.common._migrate_cleanup( + conn, vol, self.data.storage_system, 'BRONZE1', + vol['name'], extraSpecs) + self.assertTrue(return_to_default) + def test_wait_for_job_complete(self): myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' @@ -3839,7 +3956,7 @@ class EMCVMAXFCDriverNoFastTestCase(test.TestCase): def test_get_port_group_parser(self): self.create_fake_config_file_parse_port_group() - for _ in range(0, 10): + for _var in range(0, 10): self.assertEqual( u'OS-PORTGROUP1-PG', self.driver.utils.parse_file_to_get_port_group_name( diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py index 3c7d9bd490c..aadf3184b6b 100644 --- a/cinder/volume/drivers/emc/emc_vmax_common.py +++ b/cinder/volume/drivers/emc/emc_vmax_common.py @@ -803,42 +803,41 @@ class EMCVMAXCommon(object): :param sourceFastPolicyName: the source FAST policy name :param volumeName: the volume Name :param extraSpecs: extra specifications + :returns: boolean -- True/False """ LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."), {'volumeName': volumeName}) - + return_to_default = True controllerConfigurationService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) # Check to see what SG it is in. - assocStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume(conn, - volumeInstance.path)) + assocStorageGroupInstanceNames = ( + self.utils.get_storage_groups_from_volume(conn, + volumeInstance.path)) # This is the SG it should be in. defaultStorageGroupInstanceName = ( self.fast.get_policy_default_storage_group( conn, controllerConfigurationService, sourceFastPolicyName)) - # It is not in any storage group. Must add it to default source. - if assocStorageGroupInstanceName is None: - self.add_to_default_SG(conn, volumeInstance, - storageSystemName, sourceFastPolicyName, - volumeName, extraSpecs) - - # It is in the incorrect storage group. - if (assocStorageGroupInstanceName is not None and - (assocStorageGroupInstanceName != - defaultStorageGroupInstanceName)): - self.provision.remove_device_from_storage_group( - conn, controllerConfigurationService, - assocStorageGroupInstanceName, - volumeInstance.path, volumeName, extraSpecs) - + for assocStorageGroupInstanceName in assocStorageGroupInstanceNames: + # It is in the incorrect storage group. + if (assocStorageGroupInstanceName != + defaultStorageGroupInstanceName): + self.provision.remove_device_from_storage_group( + conn, controllerConfigurationService, + assocStorageGroupInstanceName, + volumeInstance.path, volumeName, extraSpecs) + else: + # The volume is already in the default. + return_to_default = False + if return_to_default: self.add_to_default_SG( conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs) + return return_to_default def _migrate_volume_fast_target( self, volumeInstance, storageSystemName, @@ -1041,7 +1040,7 @@ class EMCVMAXCommon(object): def _is_valid_for_storage_assisted_migration_v3( self, volumeInstanceName, host, sourceArraySerialNumber, - sourcePoolName, volumeName, volumeStatus): + sourcePoolName, volumeName, volumeStatus, sgName): """Check if volume is suitable for storage assisted (pool) migration. :param volumeInstanceName: the volume instance id @@ -1050,7 +1049,8 @@ class EMCVMAXCommon(object): the original volume :param sourcePoolName: the pool name of the original volume :param volumeName: the name of the volume to be migrated - :param volumeStatus: the status of the volume e.g + :param volumeStatus: the status of the volume + :param sgName: storage group name :returns: boolean -- True/False :returns: string -- targetSlo :returns: string -- targetWorkload @@ -1094,7 +1094,7 @@ class EMCVMAXCommon(object): foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - self.conn, volumeInstanceName)) + self.conn, volumeInstanceName, sgName)) if foundStorageGroupInstanceName is None: LOG.warning(_LW( "Volume: %(volumeName)s is not currently " @@ -1775,13 +1775,14 @@ class EMCVMAXCommon(object): controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) + defaultSgName = self.fast.format_default_sg_string(fastPolicyName) self.fast.add_volume_to_default_storage_group_for_fast_policy( self.conn, controllerConfigurationService, volumeInstance, volumeName, fastPolicyName, extraSpecs) foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path)) + self.conn, volumeInstance.path, defaultSgName)) if foundStorageGroupInstanceName is None: exceptionMessage = (_( @@ -2948,11 +2949,14 @@ class EMCVMAXCommon(object): :param extraSpecs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ + storageGroupName = self.utils.get_v3_storage_group_name( + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) volumeInstanceName = volumeInstance.path isValid, targetSlo, targetWorkload = ( self._is_valid_for_storage_assisted_migration_v3( volumeInstanceName, host, extraSpecs[ARRAY], - extraSpecs[POOL], volumeName, volumeStatus)) + extraSpecs[POOL], volumeName, volumeStatus, + storageGroupName)) storageSystemName = volumeInstance['SystemName'] @@ -2998,10 +3002,12 @@ class EMCVMAXCommon(object): controllerConfigService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) + defaultSgName = self.utils.get_v3_storage_group_name( + extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path)) + self.conn, volumeInstance.path, defaultSgName)) if foundStorageGroupInstanceName is None: LOG.warning(_LW( "Volume : %(volumeName)s is not currently " @@ -3017,7 +3023,7 @@ class EMCVMAXCommon(object): # Check that it has been removed. sgFromVolRemovedInstanceName = ( self.utils.wrap_get_storage_group_from_volume( - self.conn, volumeInstance.path)) + self.conn, volumeInstance.path, defaultSgName)) if sgFromVolRemovedInstanceName is not None: LOG.error(_LE( "Volume : %(volumeName)s has not been " @@ -3044,7 +3050,7 @@ class EMCVMAXCommon(object): # Check that it has been added. sgFromVolAddedInstanceName = ( self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path)) + self.conn, volumeInstance.path, storageGroupName)) if sgFromVolAddedInstanceName is None: LOG.error(_LE( "Volume : %(volumeName)s has not been " diff --git a/cinder/volume/drivers/emc/emc_vmax_fast.py b/cinder/volume/drivers/emc/emc_vmax_fast.py index 0dc3199fb1f..a70054f1c9b 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fast.py +++ b/cinder/volume/drivers/emc/emc_vmax_fast.py @@ -106,7 +106,7 @@ class EMCVMAXFast(object): :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param fastPolicyName: the fast policy name (String) - :returns: foundDefaultStorageGroupInstanceName + :returns: foundDefaultStorageGroupInstanceName, defaultSgName """ foundDefaultStorageGroupInstanceName = None storageSystemInstanceName = self.utils.find_storage_system( @@ -117,9 +117,11 @@ class EMCVMAXFast(object): "FAST is not supported on this array.")) raise + defaultSgName = self.format_default_sg_string(fastPolicyName) assocStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume(conn, volumeInstanceName)) - defaultSgName = self._format_default_sg_string(fastPolicyName) + self.utils.get_storage_group_from_volume(conn, volumeInstanceName, + defaultSgName)) + defaultStorageGroupInstanceName = ( self.utils.find_storage_masking_group(conn, controllerConfigService, @@ -137,12 +139,12 @@ class EMCVMAXFast(object): else: LOG.warning(_LW( "Volume: %(volumeName)s Does not belong " - "to storage storage group %(defaultSgGroupName)s."), + "to storage group %(defaultSgName)s."), {'volumeName': volumeName, - 'defaultSgGroupName': defaultSgName}) - return foundDefaultStorageGroupInstanceName + 'defaultSgName': defaultSgName}) + return foundDefaultStorageGroupInstanceName, defaultSgName - def _format_default_sg_string(self, fastPolicyName): + def format_default_sg_string(self, fastPolicyName): """Format the default storage group name :param fastPolicyName: the fast policy name @@ -171,14 +173,13 @@ class EMCVMAXFast(object): associated with the volume """ failedRet = None - defaultSgName = self._format_default_sg_string(fastPolicyName) + defaultSgName = self.format_default_sg_string(fastPolicyName) storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigService, defaultSgName) if storageGroupInstanceName is None: LOG.error(_LE( - "Unable to create default storage group for " - "FAST policy : %(fastPolicyName)s."), - {'fastPolicyName': fastPolicyName}) + "Unable to get default storage group %(defaultSgName)s."), + {'defaultSgName': defaultSgName}) return failedRet self.provision.add_members_to_masking_group( @@ -187,7 +188,8 @@ class EMCVMAXFast(object): # Check to see if the volume is in the storage group. assocStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume(conn, - volumeInstance.path)) + volumeInstance.path, + defaultSgName)) return assocStorageGroupInstanceName def _create_default_storage_group(self, conn, controllerConfigService, @@ -750,7 +752,7 @@ class EMCVMAXFast(object): :returns: defaultStorageGroupInstanceName - the default storage group instance name """ - defaultSgName = self._format_default_sg_string(fastPolicyName) + defaultSgName = self.format_default_sg_string(fastPolicyName) defaultStorageGroupInstanceName = ( self.utils.find_storage_masking_group(conn, controllerConfigService, diff --git a/cinder/volume/drivers/emc/emc_vmax_fc.py b/cinder/volume/drivers/emc/emc_vmax_fc.py index 22f3115b8be..0889dc35dc8 100644 --- a/cinder/volume/drivers/emc/emc_vmax_fc.py +++ b/cinder/volume/drivers/emc/emc_vmax_fc.py @@ -35,7 +35,8 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver): 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) - 2.1.3 - Clean up failed clones (bug #1440154) + 2.1.2 - Clean up failed clones (bug #1440154) + 2.1.3 - Fixed a problem with FAST support (bug #1435069) """ VERSION = "2.1.3" diff --git a/cinder/volume/drivers/emc/emc_vmax_iscsi.py b/cinder/volume/drivers/emc/emc_vmax_iscsi.py index b7778cf529f..da99a150d2a 100644 --- a/cinder/volume/drivers/emc/emc_vmax_iscsi.py +++ b/cinder/volume/drivers/emc/emc_vmax_iscsi.py @@ -43,7 +43,8 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver): 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) - 2.1.3 - Clean up failed clones (bug #1440154) + 2.1.2 - Clean up failed clones (bug #1440154) + 2.1.3 - Fixed a problem with FAST support (bug #1435069) """ VERSION = "2.1.3" diff --git a/cinder/volume/drivers/emc/emc_vmax_masking.py b/cinder/volume/drivers/emc/emc_vmax_masking.py index b4711ef4b0a..2d0d2b07d44 100644 --- a/cinder/volume/drivers/emc/emc_vmax_masking.py +++ b/cinder/volume/drivers/emc/emc_vmax_masking.py @@ -93,7 +93,8 @@ class EMCVMAXMasking(object): if isV3: assocStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - conn, volumeInstance.path)) + conn, volumeInstance.path, + maskingViewDict['sgGroupName'])) instance = conn.GetInstance( assocStorageGroupInstanceName, LocalOnly=False) assocStorageGroupName = instance['ElementName'] @@ -125,14 +126,24 @@ class EMCVMAXMasking(object): volumeName, fastPolicyName, extraSpecs)) - maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( - self._validate_masking_view(conn, maskingViewDict, - defaultStorageGroupInstanceName)) - - LOG.debug( - "The masking view in the attach operation is " - "%(maskingViewInstanceName)s.", - {'maskingViewInstanceName': maskingViewInstanceName}) + # If anything has gone wrong with the masking view we rollback + try: + maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( + self._validate_masking_view(conn, maskingViewDict, + defaultStorageGroupInstanceName)) + LOG.debug( + "The masking view in the attach operation is " + "%(maskingViewInstanceName)s. The storage group " + "in the masking view is %(storageGroupInstanceName)s.", + {'maskingViewInstanceName': maskingViewInstanceName, + 'storageGroupInstanceName': storageGroupInstanceName}) + except Exception as e: + LOG.exception(_LE( + "Masking View creation or retrieval was not successful " + "for masking view %(maskingViewName)s. " + "Attempting rollback."), + {'maskingViewName': maskingViewDict['maskingViewName']}) + errorMessage = e if not errorMessage: # Only after the masking view has been validated, add the @@ -149,6 +160,7 @@ class EMCVMAXMasking(object): rollbackDict['fastPolicyName'] = fastPolicyName rollbackDict['isV3'] = isV3 rollbackDict['extraSpecs'] = extraSpecs + rollbackDict['sgName'] = maskingViewDict['sgGroupName'] if errorMessage: # Rollback code if we cannot complete any of the steps above @@ -471,7 +483,7 @@ class EMCVMAXMasking(object): msg = None if self._is_volume_in_storage_group( conn, storageGroupInstanceName, - volumeInstance): + volumeInstance, sgGroupName): LOG.warning(_LW( "Volume: %(volumeName)s is already part " "of storage group %(sgGroupName)s."), @@ -484,7 +496,7 @@ class EMCVMAXMasking(object): sgGroupName, maskingViewDict['extraSpecs']) if not self._is_volume_in_storage_group( conn, storageGroupInstanceName, - volumeInstance): + volumeInstance, sgGroupName): # This may be used in exception hence _ instead of _LE. msg = (_( "Volume: %(volumeName)s was not added " @@ -533,8 +545,7 @@ class EMCVMAXMasking(object): volumeName, fastPolicyName, extraSpecs)) if retStorageGroupInstanceName is None: exceptionMessage = (_( - "Failed to remove volume %(volumeName)s from default SG: " - "%(volumeName)s.") + "Failed to remove volume %(volumeName)s from default SG.") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( @@ -577,7 +588,8 @@ class EMCVMAXMasking(object): # Required for unit tests. emptyStorageGroupInstanceName = ( - self._wrap_get_storage_group_from_volume(conn, volumeInstanceName)) + self._wrap_get_storage_group_from_volume( + conn, volumeInstanceName, maskingViewDict['sgGroupName'])) if emptyStorageGroupInstanceName is not None: exceptionMessage = (_( @@ -589,7 +601,7 @@ class EMCVMAXMasking(object): data=exceptionMessage) def _is_volume_in_storage_group( - self, conn, storageGroupInstanceName, volumeInstance): + self, conn, storageGroupInstanceName, volumeInstance, sgName): """Check if the volume is already part of the storage group. Check if the volume is already part of the storage group, @@ -602,7 +614,7 @@ class EMCVMAXMasking(object): """ foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - conn, volumeInstance.path)) + conn, volumeInstance.path, sgName)) if foundStorageGroupInstanceName is not None: storageGroupInstance = conn.GetInstance( @@ -1195,8 +1207,10 @@ class EMCVMAXMasking(object): :param conn: the connection to the ecom server :param rollbackDict: the rollback dictionary + :returns: message :raises: VolumeBackendAPIException """ + message = None try: if rollbackDict['isV3']: errorMessage = self._check_adding_volume_to_storage_group( @@ -1204,14 +1218,16 @@ class EMCVMAXMasking(object): rollbackDict['defaultStorageGroupInstanceName']) if errorMessage: LOG.error(errorMessage) + message = (_("V3 rollback")) else: foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( - conn, rollbackDict['volumeInstance'].path)) + conn, rollbackDict['volumeInstance'].path, + rollbackDict['sgName'])) # Volume is not associated with any storage group so add # it back to the default. - if len(foundStorageGroupInstanceName) == 0: + if not foundStorageGroupInstanceName: LOG.warning(_LW( "No storage group found. " "Performing rollback on Volume: %(volumeName)s " @@ -1237,26 +1253,29 @@ class EMCVMAXMasking(object): "admin to get the volume re-added manually."), {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) - if len(foundStorageGroupInstanceName) > 0: + message = (_("V2 rollback, volume is not in any storage " + "group.")) + else: LOG.info(_LI( "The storage group found is " "%(foundStorageGroupInstanceName)s."), {'foundStorageGroupInstanceName': foundStorageGroupInstanceName}) - # Check the name, see is it the default storage group - # or another. - if (foundStorageGroupInstanceName != - rollbackDict['defaultStorageGroupInstanceName']): - # Remove it from its current masking view and return it - # to its default masking view if fast is enabled. - self.remove_and_reset_members( - conn, - rollbackDict['controllerConfigService'], - rollbackDict['volumeInstance'], - rollbackDict['fastPolicyName'], - rollbackDict['volumeName'], rollbackDict['extraSpecs'], - False) + # Check the name, see is it the default storage group + # or another. + if (foundStorageGroupInstanceName != + rollbackDict['defaultStorageGroupInstanceName']): + # Remove it from its current masking view and return it + # to its default masking view if fast is enabled. + self.remove_and_reset_members( + conn, + rollbackDict['controllerConfigService'], + rollbackDict['volumeInstance'], + rollbackDict['volumeName'], + rollbackDict['extraSpecs']) + message = (_("V2 rollback - Volume in another storage " + "group besides default storage group.")) except Exception: errorMessage = (_( "Rollback for Volume: %(volumeName)s has failed. " @@ -1267,6 +1286,7 @@ class EMCVMAXMasking(object): 'fastPolicyName': rollbackDict['fastPolicyName']}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) + return message def _find_new_initiator_group(self, conn, maskingGroupDict): """After creating an new initiator group find it and return it. @@ -1575,7 +1595,7 @@ class EMCVMAXMasking(object): :returns: instance name defaultStorageGroupInstanceName """ failedRet = None - defaultStorageGroupInstanceName = ( + defaultStorageGroupInstanceName, defaultSgName = ( self.fast.get_and_verify_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName)) @@ -1610,7 +1630,8 @@ class EMCVMAXMasking(object): # Required for unit tests. emptyStorageGroupInstanceName = ( - self._wrap_get_storage_group_from_volume(conn, volumeInstanceName)) + self._wrap_get_storage_group_from_volume(conn, volumeInstanceName, + defaultSgName)) if emptyStorageGroupInstanceName is not None: LOG.error(_LE( @@ -1621,18 +1642,20 @@ class EMCVMAXMasking(object): return defaultStorageGroupInstanceName - def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName): + def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, + defaultSgName): """Wrapper for get_storage_group_from_volume. Needed for override in tests. :param conn: the connection to the ecom server :param volumeInstanceName: the volume instance name + :param defaultSgName: the default storage group name :returns: emptyStorageGroupInstanceName """ return self.utils.get_storage_group_from_volume( - conn, volumeInstanceName) + conn, volumeInstanceName, defaultSgName) def get_devices_from_storage_group( self, conn, storageGroupInstanceName): diff --git a/cinder/volume/drivers/emc/emc_vmax_utils.py b/cinder/volume/drivers/emc/emc_vmax_utils.py index 30bb8ad5ec2..5444053a011 100644 --- a/cinder/volume/drivers/emc/emc_vmax_utils.py +++ b/cinder/volume/drivers/emc/emc_vmax_utils.py @@ -489,7 +489,7 @@ class EMCVMAXUtils(object): return foundStorageSystemInstanceName - def get_storage_group_from_volume(self, conn, volumeInstanceName): + def get_storage_group_from_volume(self, conn, volumeInstanceName, sgName): """Returns the storage group for a particular volume. Given the volume instance name get the associated storage group if it @@ -505,14 +505,50 @@ class EMCVMAXUtils(object): volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup') - if len(storageGroupInstanceNames) > 0: - foundStorageGroupInstanceName = storageGroupInstanceNames[0] + if len(storageGroupInstanceNames) > 1: + LOG.info(_LI( + "The volume belongs to more than one storage group. " + "Returning storage group %(sgName)s."), + {'sgName': sgName}) + for storageGroupInstanceName in storageGroupInstanceNames: + instance = self.get_existing_instance( + conn, storageGroupInstanceName) + if instance and sgName == instance['ElementName']: + foundStorageGroupInstanceName = storageGroupInstanceName + break return foundStorageGroupInstanceName - def wrap_get_storage_group_from_volume(self, conn, volumeInstanceName): + def get_storage_groups_from_volume(self, conn, volumeInstanceName): + """Returns all the storage group for a particular volume. + + Given the volume instance name get all the associated storage groups. + + :param conn: connection to the ecom server + :param volumeInstanceName: the volume instance name + :returns: foundStorageGroupInstanceName + """ + storageGroupInstanceNames = conn.AssociatorNames( + volumeInstanceName, + ResultClass='CIM_DeviceMaskingGroup') + + if storageGroupInstanceNames: + LOG.debug("There are %(len)d storage groups associated " + "with volume %(volumeInstanceName)s.", + {'len': len(storageGroupInstanceNames), + 'volumeInstanceName': volumeInstanceName}) + else: + LOG.debug("There are no storage groups associated " + "with volume %(volumeInstanceName)s.", + {'volumeInstanceName': volumeInstanceName}) + + return storageGroupInstanceNames + + def wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, + sgName): """Unit test aid""" - return self.get_storage_group_from_volume(conn, volumeInstanceName) + return self.get_storage_group_from_volume(conn, volumeInstanceName, + sgName) def find_storage_masking_group(self, conn, controllerConfigService, storageGroupName):