diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py index c4e75381532..c480048976c 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py @@ -224,10 +224,10 @@ class PowerMaxCommonTest(test.TestCase): model_update = self.common.create_volume(self.data.test_volume) self.assertEqual(ref_model_update, model_update) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') - def test_create_volume_from_snapshot(self, mck_meta, mck_clone_chk): + def test_create_volume_from_snapshot(self, mck_meta, mck_cleanup_snaps): ref_model_update = ({'provider_location': six.text_type( deepcopy(self.data.provider_location_snapshot))}) model_update = self.common.create_volume_from_snapshot( @@ -362,10 +362,14 @@ class PowerMaxCommonTest(test.TestCase): mck_configure.assert_called_once() mck_resume.assert_called_once() - @mock.patch.object(common.PowerMaxCommon, '_clone_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') - def test_cloned_volume(self, mck_meta, mck_clone_chk): + def test_cloned_volume(self, mck_meta, mck_cleanup_snaps): + array = self.data.array + test_volume = self.data.test_clone_volume + source_device_id = self.data.device_id + extra_specs = self.common._initial_setup(test_volume) ref_model_update = ({'provider_location': six.text_type( self.data.provider_location_clone)}) model_update = self.common.create_cloned_volume( @@ -373,18 +377,55 @@ class PowerMaxCommonTest(test.TestCase): self.assertEqual( ast.literal_eval(ref_model_update['provider_location']), ast.literal_eval(model_update['provider_location'])) + mck_cleanup_snaps.assert_called_once_with( + array, source_device_id, extra_specs) - def test_delete_volume(self): + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) + def test_delete_volume(self, mck_get_snaps): with mock.patch.object(self.common, '_delete_volume') as mock_delete: self.common.delete_volume(self.data.test_volume) mock_delete.assert_called_once_with(self.data.test_volume) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') + @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info', + return_value=tpd.PowerMaxData.volume_snap_vx) + def test_delete_volume_fail_if_active_snapshots( + self, mck_get_snaps, mck_cleanup, mck_delete): + array = self.data.array + test_volume = self.data.test_volume + device_id = self.data.device_id + extra_specs = self.common._initial_setup(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_volume, test_volume) + mck_cleanup.assert_called_once_with(array, device_id, extra_specs) + mck_delete.assert_not_called() + + @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + @mock.patch.object( + rest.PowerMaxRest, 'find_snap_vx_sessions', + return_value=('', tpd.PowerMaxData.snap_tgt_session_cm_enabled)) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) + def test_delete_volume_fail_if_snapvx_target( + self, mck_get_snaps, mck_tgt_snap, mck_cleanup, mck_delete): + array = self.data.array + test_volume = self.data.test_volume + device_id = self.data.device_id + extra_specs = self.common._initial_setup(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_volume, test_volume) + mck_cleanup.assert_called_once_with(array, device_id, extra_specs) + mck_delete.assert_not_called() + + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( common.PowerMaxCommon, 'get_snapshot_metadata', return_value={'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2'}) - def test_create_snapshot(self, mck_meta, mck_clone_chk): + def test_create_snapshot(self, mck_meta, mck_cleanup_snaps): ref_model_update = ( {'provider_location': six.text_type(self.data.snap_location), 'metadata': {'snap-meta-key-1': 'snap-meta-value-1', @@ -1171,8 +1212,8 @@ class PowerMaxCommonTest(test.TestCase): volume, connector, extra_specs) self.assertEqual('NONE', masking_view_dict[utils.WORKLOAD]) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_create_cloned_volume(self, mck_clone_chk): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_create_cloned_volume(self, mck_cleanup_snaps): volume = self.data.test_clone_volume source_volume = self.data.test_volume extra_specs = self.data.extra_specs @@ -1182,8 +1223,8 @@ class PowerMaxCommonTest(test.TestCase): volume, source_volume, extra_specs)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_create_cloned_volume_is_snapshot(self, mck_clone_chk): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_create_cloned_volume_is_snapshot(self, mck_cleanup_snaps): volume = self.data.test_snapshot source_volume = self.data.test_volume extra_specs = self.data.extra_specs @@ -1193,8 +1234,8 @@ class PowerMaxCommonTest(test.TestCase): volume, source_volume, extra_specs, True, False)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_create_cloned_volume_from_snapshot(self, mck_clone_chk): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_create_cloned_volume_from_snapshot(self, mck_cleanup_snaps): volume = self.data.test_clone_volume source_volume = self.data.test_snapshot extra_specs = self.data.extra_specs @@ -1232,7 +1273,7 @@ class PowerMaxCommonTest(test.TestCase): '_find_device_on_array', return_value=None) @mock.patch.object(common.PowerMaxCommon, - '_clone_check') + '_cleanup_device_snapvx') def test_create_cloned_volume_source_not_found( self, mock_check, mock_device): volume = self.data.test_clone_volume @@ -1300,16 +1341,18 @@ class PowerMaxCommonTest(test.TestCase): self.common._create_snapshot, array, snapshot, source_device_id, extra_specs) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') - def test_delete_volume_from_srp(self, mock_rm): + def test_delete_volume_from_srp(self, mock_rm, mock_get_snaps): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_volume.name ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f volume = self.data.test_volume - with mock.patch.object(self.common, '_sync_check'): + with mock.patch.object(self.common, '_cleanup_device_snapvx'): with mock.patch.object( self.common, '_delete_from_srp') as mock_delete: self.common._delete_volume(volume) @@ -2062,8 +2105,8 @@ class PowerMaxCommonTest(test.TestCase): return_value=(False, False, False)) @mock.patch.object(common.PowerMaxCommon, '_remove_vol_and_cleanup_replication') - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_unmanage_success(self, mck_clone, mock_rm, mck_sess): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_unmanage_success(self, mck_cleanup_snaps, mock_rm, mck_sess): volume = self.data.test_volume with mock.patch.object(self.rest, 'rename_volume') as mock_rename: self.common.unmanage(volume) @@ -2090,8 +2133,8 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(True, True, False)) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_unmanage_temp_snapshot_links(self, mck_clone, mck_sess): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_unmanage_temp_snapshot_links(self, mck_cleanup_snaps, mck_sess): volume = self.data.test_volume self.assertRaises(exception.VolumeIsBusy, self.common.unmanage, volume) @@ -2251,6 +2294,62 @@ class PowerMaxCommonTest(test.TestCase): target_slo, target_workload, target_extra_specs) self.assertTrue(success) + @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', + return_value=tpd.PowerMaxData.rep_config_metro) + @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', + side_effect=[False, True]) + @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') + @mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[]) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=[{'snapshotName': 'name', + 'linkedDevices': 'details'}]) + def test_migrate_to_metro_exception_on_linked_snapshot_source( + self, mck_get, mck_slo, mck_validate, mck_rep, mck_config): + array_id = self.data.array + volume = self.data.test_volume + device_id = self.data.device_id + srp = self.data.srp + target_slo = self.data.slo_silver + target_workload = self.data.workload + volume_name = volume.name + target_extra_specs = self.data.rep_extra_specs_rep_config_metro + new_type = {'extra_specs': target_extra_specs} + extra_specs = self.data.extra_specs + self.assertRaises( + exception.VolumeBackendAPIException, self.common._migrate_volume, + array_id, volume, device_id, srp, target_slo, target_workload, + volume_name, new_type, extra_specs) + + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', + return_value=tpd.PowerMaxData.rep_config_metro) + @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', + side_effect=[False, True]) + @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') + @mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[]) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=[{'snapshotName': 'name'}]) + @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', + return_value=('', {'source_vol_id': 'source_vol_id', + 'snap_name': 'snap_name'})) + def test_migrate_to_metro_exception_on_snapshot_target( + self, mck_find, mck_snap, mck_slo, mck_validate, mck_rep, + mck_config, mck_cleanup): + array_id = self.data.array + volume = self.data.test_volume + device_id = self.data.device_id + srp = self.data.srp + target_slo = self.data.slo_silver + target_workload = self.data.workload + volume_name = volume.name + target_extra_specs = self.data.rep_extra_specs_rep_config_metro + new_type = {'extra_specs': target_extra_specs} + extra_specs = self.data.extra_specs + self.assertRaises( + exception.VolumeBackendAPIException, self.common._migrate_volume, + array_id, volume, device_id, srp, target_slo, target_workload, + volume_name, new_type, extra_specs) + @mock.patch.object(common.PowerMaxCommon, '_post_retype_srdf_protect_storage_group', return_value=(True, True, True)) @@ -2821,8 +2920,10 @@ class PowerMaxCommonTest(test.TestCase): self.common._update_group_promotion, group, add_vols, remove_vols) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) - def test_delete_group(self, mock_check): + def test_delete_group(self, mock_check, mck_snaps): group = self.data.test_group_1 volumes = [self.data.test_volume] context = None @@ -2837,8 +2938,10 @@ class PowerMaxCommonTest(test.TestCase): context, group, volumes) self.assertEqual(ref_model_update, model_update) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) - def test_delete_group_success(self, mock_check): + def test_delete_group_success(self, mock_check, mck_get_snaps): group = self.data.test_group_1 volumes = [] ref_model_update = {'status': fields.GroupStatus.DELETED} @@ -2850,7 +2953,9 @@ class PowerMaxCommonTest(test.TestCase): model_update, __ = self.common._delete_group(group, volumes) self.assertEqual(ref_model_update, model_update) - def test_delete_group_already_deleted(self): + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) + def test_delete_group_already_deleted(self, mck_get_snaps): group = self.data.test_group_failed ref_model_update = {'status': fields.GroupStatus.DELETED} volumes = [] @@ -2859,10 +2964,13 @@ class PowerMaxCommonTest(test.TestCase): model_update, __ = self.common._delete_group(group, volumes) self.assertEqual(ref_model_update, model_update) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) - def test_delete_group_failed(self, mock_check, mock_type_check): + def test_delete_group_failed( + self, mock_check, mock_type_check, mck_get_snaps): group = self.data.test_group_1 volumes = [] ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING} @@ -2873,6 +2981,8 @@ class PowerMaxCommonTest(test.TestCase): group, volumes) self.assertEqual(ref_model_update, model_update) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @@ -2885,15 +2995,38 @@ class PowerMaxCommonTest(test.TestCase): return_value= tpd.PowerMaxData.device_id) @mock.patch.object(masking.PowerMaxMasking, 'remove_volumes_from_storage_group') - def test_delete_group_clone_check( + def test_delete_group_cleanup_snapvx( self, mock_rem, mock_find, mock_mems, mock_vols, mock_chk1, - mock_chk2): + mock_chk2, mck_get_snaps): group = self.data.test_group_1 volumes = [self.data.test_volume_group_member] with mock.patch.object( - self.common, '_clone_check') as mock_clone_chk: + self.common, '_cleanup_device_snapvx') as mock_cleanup_snapvx: self.common._delete_group(group, volumes) - mock_clone_chk.assert_called_once() + mock_cleanup_snapvx.assert_called_once() + + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=[{'snapshotName': 'name'}]) + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_delete_group_with_volumes_exception_on_remaining_snapshots( + self, mck_cleanup, mck_get): + group = self.data.test_group_1 + volumes = [self.data.test_volume_group_member] + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_group, group, volumes) + + @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', + return_value=('', {'source_vol_id': 'id', + 'snap_name': 'name'})) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=None) + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_delete_group_with_volumes_exception_on_target_links( + self, mck_cleanup, mck_get, mck_find): + group = self.data.test_group_1 + volumes = [self.data.test_volume_group_member] + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_group, group, volumes) @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(common.PowerMaxCommon, '_failover_replication', @@ -3104,12 +3237,12 @@ class PowerMaxCommonTest(test.TestCase): self.common.unmanage_snapshot(self.data.test_snapshot_manage) mock_mod.assert_called_once() - @mock.patch.object(common.PowerMaxCommon, '_sync_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap') - def test_unmanage_snapshot_no_sync_check(self, mock_mod, mock_sync): + def test_unmanage_snapshot_no_snapvx_cleanup(self, mock_mod, mock_cleanup): self.common.unmanage_snapshot(self.data.test_snapshot_manage) mock_mod.assert_called_once() - mock_sync.assert_not_called() + mock_cleanup.assert_not_called() @mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over', return_value=True) @@ -3133,7 +3266,7 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(provision.PowerMaxProvision, 'delete_volume_snap') @mock.patch.object(provision.PowerMaxProvision, 'is_restore_complete', return_value=True) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(provision.PowerMaxProvision, 'revert_volume_snapshot') def test_revert_to_snapshot(self, mock_revert, mock_clone, mock_complete, mock_delete, mock_parse): @@ -3388,8 +3521,8 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') - def test_extend_vol_validation_checks_success(self, mck_sync, mck_rep): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_extend_vol_validation_checks_success(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id @@ -3400,8 +3533,8 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') - def test_extend_vol_val_check_no_device(self, mck_sync, mck_rep): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_extend_vol_val_check_no_device(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = None @@ -3414,8 +3547,8 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, True, None)) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') - def test_extend_vol_val_check_snap_src(self, mck_sync, mck_rep): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_extend_vol_val_check_snap_src(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id @@ -3429,8 +3562,8 @@ class PowerMaxCommonTest(test.TestCase): @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') - def test_extend_vol_val_check_wrong_size(self, mck_sync, mck_rep): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_extend_vol_val_check_wrong_size(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id @@ -3585,103 +3718,6 @@ class PowerMaxCommonTest(test.TestCase): port = self.common._get_unisphere_port() self.assertEqual(ref_port, port) - def test_sync_check(self): - array = self.data.array - device_id = self.data.device_id - extra_specs = self.data.extra_specs - - with mock.patch.object(self.common, '_do_sync_check') as mck_sync: - self.common._sync_check(array, device_id, extra_specs, False, - self.data.device_id2) - mck_sync.assert_called_with(array, self.data.device_id, - extra_specs, False) - mck_sync.reset_mock() - with mock.patch.object(self.common, '_get_target_source_device', - return_value=self.data.device_id3): - self.common._sync_check(array, device_id, extra_specs, True) - mck_sync.assert_called_with(array, self.data.device_id, - extra_specs, True) - mck_sync.reset_mock() - self.common._sync_check(array, device_id, extra_specs) - mck_sync.assert_called_with(array, device_id, extra_specs, False) - - @mock.patch.object(common.PowerMaxCommon, - '_unlink_targets_and_delete_temp_snapvx') - @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', - return_value=(tpd.PowerMaxData.snap_src_sessions, - tpd.PowerMaxData.snap_tgt_session)) - @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', - return_value=(True, True, False)) - def test_do_sync_check(self, mck_rep, mck_find, mck_unlink): - - array = self.data.array - device_id = self.data.device_id - extra_specs = self.data.extra_specs - self.common._do_sync_check(array, device_id, extra_specs) - self.assertEqual(3, mck_unlink.call_count) - - @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') - @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') - def test_unlink_targets_and_delete_temp_snapvx_legacy( - self, mck_break, mck_del): - array = self.data.array - extra_specs = self.data.extra_specs - session = deepcopy(self.data.snap_tgt_session_cm_enabled) - legacy_snap_name = 'EMC_SMI' - session['snap_name'] = legacy_snap_name - source = session['source_vol_id'] - snap_id = session['snapid'] - target = session['target_vol_id'] - self.common._unlink_targets_and_delete_temp_snapvx( - session, array, extra_specs) - mck_break.assert_called_with(array, target, source, legacy_snap_name, - extra_specs, snap_id, True) - mck_del.assert_called_once_with( - array, legacy_snap_name, source, snap_id) - - @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') - @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') - def test_unlink_targets_and_delete_temp_snapvx_is_temp( - self, mck_break, mck_del): - array = self.data.array - extra_specs = self.data.extra_specs - session = deepcopy(self.data.snap_tgt_session_cm_enabled) - snap_name = session['snap_name'] - source = session['source_vol_id'] - snap_id = session['snapid'] - target = session['target_vol_id'] - self.common._unlink_targets_and_delete_temp_snapvx( - session, array, extra_specs) - mck_break.assert_called_with(array, target, source, snap_name, - extra_specs, snap_id, True) - mck_del.assert_called_once_with(array, snap_name, source, snap_id) - - def test_unlink_targets_and_delete_temp_snapvx_no_snap_name_found(self): - array = self.data.array - extra_specs = self.data.extra_specs - session = dict() - self.assertRaises(exception.VolumeBackendAPIException, - self.common._unlink_targets_and_delete_temp_snapvx, - session, array, extra_specs) - - @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') - @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') - def test_unlink_targets_and_delete_temp_snapvx_not_legacy_not_temp( - self, mck_break, mck_del): - array = self.data.array - extra_specs = self.data.extra_specs - session = deepcopy(self.data.snap_tgt_session_cm_enabled) - session['snap_name'] = 'not_leg_or_tmp' - snap_name = session['snap_name'] - source = session['source_vol_id'] - snap_id = session['snapid'] - target = session['target_vol_id'] - self.common._unlink_targets_and_delete_temp_snapvx( - session, array, extra_specs) - mck_break.assert_called_with(array, target, source, snap_name, - extra_specs, snap_id, True) - mck_del.assert_not_called() - @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=(None, tpd.PowerMaxData.snap_tgt_session)) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', @@ -3692,83 +3728,6 @@ class PowerMaxCommonTest(test.TestCase): src_device = self.common._get_target_source_device(array, tgt_device) self.assertEqual(src_device, self.data.device_id) - @mock.patch.object(common.PowerMaxCommon, '_delete_valid_snapshot') - @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', - return_value=(tpd.PowerMaxData.snap_src_sessions, - tpd.PowerMaxData.snap_tgt_session)) - @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', - return_value=(True, True, False)) - def test_clone_check(self, mck_rep, mck_find, mck_del): - array = self.data.array - device_id = self.data.device_id - extra_specs = self.data.extra_specs - self.common.snapvx_unlink_limit = 3 - self.common._clone_check(array, device_id, extra_specs) - self.assertEqual(3, mck_del.call_count) - - @mock.patch.object( - common.PowerMaxCommon, '_unlink_targets_and_delete_temp_snapvx') - @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', - return_value=(tpd.PowerMaxData.snap_src_sessions, - tpd.PowerMaxData.snap_tgt_session)) - @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', - return_value=(True, True, False)) - def test_clone_check_force_unlink(self, mck_rep, mck_find, mck_del): - array = self.data.array - device_id = self.data.device_id - extra_specs = self.data.extra_specs - self.common.snapvx_unlink_limit = 3 - self.common._clone_check( - array, device_id, extra_specs, force_unlink=True) - self.assertEqual(3, mck_del.call_count) - - @mock.patch.object(common.PowerMaxCommon, - '_unlink_targets_and_delete_temp_snapvx') - def test_delete_valid_snapshot(self, mck_unlink): - - array = self.data.array - extra_specs = self.data.extra_specs - - session = {'snap_name': 'EMC_SMI_TEST', 'expired': False} - self.common._delete_valid_snapshot(array, session, extra_specs) - mck_unlink.assert_called_with(session, array, extra_specs) - mck_unlink.reset_mock() - - session = {'snap_name': 'temp-000AA-snapshot_for_clone', - 'expired': True} - self.common._delete_valid_snapshot(array, session, extra_specs) - mck_unlink.assert_called_with(session, array, extra_specs) - mck_unlink.reset_mock() - - session = {'snap_name': 'temp-000AA-snapshot_for_clone', - 'expired': False} - self.common._delete_valid_snapshot(array, session, extra_specs) - mck_unlink.assert_not_called() - - def test_delete_valid_snapshot_exception(self): - - array = self.data.array - extra_specs = self.data.extra_specs - session = {'snap_name': 'temp-000AA-snapshot_for_clone', - 'expired': True} - - with mock.patch.object( - self.common, '_unlink_targets_and_delete_temp_snapvx', - side_effect=exception.VolumeBackendAPIException( - "404 temp-000AA-snapshot_for_clone does not exist") - ) as mck_unlink: - self.common._delete_valid_snapshot(array, session, extra_specs) - mck_unlink.assert_called_with(session, array, extra_specs) - - with mock.patch.object( - self.common, '_unlink_targets_and_delete_temp_snapvx', - side_effect=exception.VolumeBackendAPIException( - "500 internal server error")): - self.assertRaises( - exception.VolumeBackendAPIException, - self.common._unlink_targets_and_delete_temp_snapvx, - array, session, extra_specs) - @mock.patch.object(rest.PowerMaxRest, '_get_private_volume', return_value=tpd.PowerMaxData.priv_vol_response_rep) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', @@ -4339,3 +4298,110 @@ class PowerMaxCommonTest(test.TestCase): rep_driver_data) self.assertIsNone(group_name) mock_group.assert_not_called() + + @mock.patch.object( + common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots') + @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', + return_value=(None, 'tgt_session')) + @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', + return_value=(True, False, False)) + def test_cleanup_device_snapvx(self, mck_is_rep, mck_find, mck_unlink): + array = self.data.array + device_id = self.data.device_id + extra_specs = self.data.extra_specs + self.common._cleanup_device_snapvx(array, device_id, extra_specs) + mck_unlink.assert_called_once_with('tgt_session', array, extra_specs) + + @mock.patch.object( + common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots') + @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', + return_value=(False, False, False)) + def test_cleanup_device_snapvx_no_sessions(self, mck_is_rep, mck_unlink): + array = self.data.array + device_id = self.data.device_id + extra_specs = self.data.extra_specs + self.common._cleanup_device_snapvx(array, device_id, extra_specs) + mck_unlink.assert_not_called() + + @mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot') + @mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot', + return_value=True) + def test_unlink_and_delete_temporary_snapshots_session_unlinked( + self, mck_unlink, mck_delete): + session = self.data.snap_tgt_session + array = self.data.array + extra_specs = self.data.extra_specs + self.common._unlink_and_delete_temporary_snapshots( + session, array, extra_specs) + mck_unlink.assert_called_once_with(session, array, extra_specs) + mck_delete.assert_called_once_with(session, array) + + @mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot') + @mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot', + return_value=False) + def test_unlink_and_delete_temporary_snapshots_session_not_unlinked( + self, mck_unlink, mck_delete): + session = self.data.snap_tgt_session + array = self.data.array + extra_specs = self.data.extra_specs + self.common._unlink_and_delete_temporary_snapshots( + session, array, extra_specs) + mck_unlink.assert_called_once_with(session, array, extra_specs) + mck_delete.assert_not_called() + + @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', + side_effect=[tpd.PowerMaxData.priv_snap_response.get( + 'snapshotSrcs')[0], None]) + def test_unlink_temp_snapshot(self, mck_get, mck_unlink): + array = self.data.array + extra_specs = self.data.extra_specs + session = self.data.snap_tgt_session + source = session.get('source_vol_id') + target = session.get('target_vol_id') + snap_name = session.get('snap_name') + snap_id = session.get('snapid') + loop = False + is_unlinked = self.common._unlink_snapshot(session, array, extra_specs) + mck_unlink.assert_called_once_with( + array, target, source, snap_name, extra_specs, snap_id, loop) + self.assertTrue(is_unlinked) + + @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', + return_value=tpd.PowerMaxData.priv_snap_response.get( + 'snapshotSrcs')[0]) + def test_unlink_temp_snapshot_not_unlinked(self, mck_get, mck_unlink): + array = self.data.array + extra_specs = self.data.extra_specs + session = self.data.snap_tgt_session + source = session.get('source_vol_id') + target = session.get('target_vol_id') + snap_name = session.get('snap_name') + snap_id = session.get('snapid') + loop = False + is_unlinked = self.common._unlink_snapshot(session, array, extra_specs) + mck_unlink.assert_called_once_with( + array, target, source, snap_name, extra_specs, snap_id, loop) + self.assertFalse(is_unlinked) + + @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', + return_value=dict()) + def test_delete_temp_snapshot(self, mck_get, mck_delete): + session = self.data.snap_tgt_session + array = self.data.array + snap_name = session.get('snap_name') + source = session.get('source_vol_id') + snap_id = session.get('snapid') + self.common._delete_temp_snapshot(session, array) + mck_delete.assert_called_once_with(array, snap_name, source, snap_id) + + @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', + return_value={'linkedDevices': 'details'}) + def test_delete_temp_snapshot_is_linked(self, mck_get, mck_delete): + session = self.data.snap_tgt_session + array = self.data.array + self.common._delete_temp_snapshot(session, array) + mck_delete.assert_not_called() diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py index 719b7c734b2..aa999e62cc4 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py @@ -235,9 +235,9 @@ class PowerMaxReplicationTest(test.TestCase): @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') - @mock.patch.object(common.PowerMaxCommon, '_sync_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_cleanup_replication_source( - self, mck_sync, mock_del, mock_rm, mock_clean): + self, mck_cleanup, mock_del, mock_rm, mock_clean): self.common._cleanup_replication_source( self.data.array, self.data.test_volume, 'vol1', {'device_id': self.data.device_id}, self.extra_specs) @@ -460,10 +460,10 @@ class PowerMaxReplicationTest(test.TestCase): @mock.patch.object(common.PowerMaxCommon, '_failover_replication', return_value=({}, {})) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) - def test_failover_host_async(self, mck_arrays, mck_sync, mock_fg): + def test_failover_host_async(self, mck_arrays, mck_cleanup, mock_fg): volumes = [self.data.test_volume] extra_specs = deepcopy(self.extra_specs) extra_specs['rep_mode'] = utils.REP_ASYNC @@ -928,8 +928,8 @@ class PowerMaxReplicationTest(test.TestCase): return_value=( tpd.PowerMaxData.provider_location_clone, tpd.PowerMaxData.replication_update, {})) - @mock.patch.object(common.PowerMaxCommon, '_clone_check') - def test_cloned_rep_volume(self, mck_clone, mck_meta, mck_clone_chk): + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') + def test_cloned_rep_volume(self, mck_cleanup, mck_meta, mck_clone_chk): metadata = deepcopy(self.data.volume_metadata) metadata['BackendID'] = self.data.rep_backend_id_sync ref_model_update = { @@ -1069,7 +1069,7 @@ class PowerMaxReplicationTest(test.TestCase): self.assertTrue(success) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') - @mock.patch.object(common.PowerMaxCommon, '_sync_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( @@ -1086,7 +1086,7 @@ class PowerMaxReplicationTest(test.TestCase): 'remote_array': tpd.PowerMaxData.remote_array}, tpd.PowerMaxData.rep_extra_specs, False)) def test_migrate_volume_success_no_rep_to_rep( - self, mck_configure, mck_retype, mck_protect, mck_get, mck_check, + self, mck_configure, mck_retype, mck_protect, mck_get, mck_cleanup, mck_valid): self.common.rep_config = {'mode': utils.REP_SYNC, 'array': self.data.array} @@ -1127,6 +1127,8 @@ class PowerMaxReplicationTest(test.TestCase): self.data.rep_extra_specs, volume) self.assertTrue(success) + @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', + return_value=list()) @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', return_value=tpd.PowerMaxData.rep_config_async) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', @@ -1143,14 +1145,15 @@ class PowerMaxReplicationTest(test.TestCase): tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) - @mock.patch.object(common.PowerMaxCommon, '_sync_check') + @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') def test_migrate_volume_success_rep_to_rep( - self, mck_valid, mck_break, mck_sync, mck_rep, mck_retype, - mck_resume, mck_slo, mck_upd_meta, mck_get_meta, mck_rep_conf): + self, mck_valid, mck_break, mck_cleanup, mck_rep, mck_retype, + mck_resume, mck_slo, mck_upd_meta, mck_get_meta, mck_rep_conf, + mck_get_snaps): array = self.data.array volume = self.data.test_volume device_id = self.data.device_id @@ -1179,7 +1182,7 @@ class PowerMaxReplicationTest(test.TestCase): mck_valid.assert_any_call(array, extra_specs) mck_break.assert_called_once_with( array, device_id, volume_name, extra_specs, volume) - mck_sync.assert_called_once_with(array, device_id, extra_specs) + mck_cleanup.assert_called_once_with(array, device_id, extra_specs) mck_rep.assert_called_once_with( array, volume, device_id, target_extra_specs) mck_retype.assert_called_once() diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py index cf1e9981415..07ab99deebe 100644 --- a/cinder/volume/drivers/dell_emc/powermax/common.py +++ b/cinder/volume/drivers/dell_emc/powermax/common.py @@ -98,12 +98,6 @@ powermax_opts = [ cfg.MultiOpt(utils.U4P_FAILOVER_TARGETS, item_type=types.Dict(), help='Dictionary of Unisphere failover target info.'), - cfg.IntOpt(utils.POWERMAX_SNAPVX_UNLINK_LIMIT, - default=3, - help='Use this value to specify ' - 'the maximum number of unlinks ' - 'for the temporary snapshots ' - 'before a clone operation.'), cfg.StrOpt(utils.POWERMAX_ARRAY, help='Serial number of the array to connect to.'), cfg.StrOpt(utils.POWERMAX_SRP, @@ -242,8 +236,6 @@ class PowerMaxCommon(object): """Get relevent details from configuration file.""" self.interval = self.configuration.safe_get('interval') self.retries = self.configuration.safe_get('retries') - self.snapvx_unlink_limit = self.configuration.safe_get( - utils.POWERMAX_SNAPVX_UNLINK_LIMIT) self.powermax_array_tag_list = self.configuration.safe_get( utils.POWERMAX_ARRAY_TAG_LIST) self.powermax_short_host_name_template = self.configuration.safe_get( @@ -626,11 +618,7 @@ class PowerMaxCommon(object): source_device_id = self._find_device_on_array( source_volume, extra_specs) - if not self.next_gen and ( - extra_specs.get('rep_mode', None) == utils.REP_METRO): - self._sync_check(array, source_device_id, extra_specs) - else: - self._clone_check(array, source_device_id, extra_specs) + self._cleanup_device_snapvx(array, source_device_id, extra_specs) clone_dict, rep_update, rep_info_dict = self._create_cloned_volume( clone_volume, source_volume, extra_specs) @@ -1219,7 +1207,7 @@ class PowerMaxCommon(object): # 2 - Check if volume is part of an on-going clone operation or if vol # has source snapshots but not next-gen array - self._sync_check(array, device_id, ex_specs) + self._cleanup_device_snapvx(array, device_id, ex_specs) __, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id) if snapvx_src: if not self.next_gen: @@ -1946,7 +1934,7 @@ class PowerMaxCommon(object): # Perform any snapvx cleanup if required before creating the clone if is_snapshot or from_snapvx: - self._clone_check(array, source_device_id, extra_specs) + self._cleanup_device_snapvx(array, source_device_id, extra_specs) if not is_snapshot: clone_dict, rep_update, rep_info_dict = self._create_replica( @@ -2047,14 +2035,8 @@ class PowerMaxCommon(object): :param volume: volume object to be deleted :returns: volume_name (string vol name) """ - source_device_id = None volume_name = volume.name extra_specs = self._initial_setup(volume) - prov_loc = volume.provider_location - - if isinstance(prov_loc, six.string_types): - name = ast.literal_eval(prov_loc) - source_device_id = name.get('source_device_id') device_id = self._find_device_on_array(volume, extra_specs) if device_id is None: @@ -2069,8 +2051,27 @@ class PowerMaxCommon(object): # Check if the volume being deleted is a # source or target for copy session - self._sync_check(array, device_id, extra_specs, - source_device_id=source_device_id) + self._cleanup_device_snapvx(array, device_id, extra_specs) + # Confirm volume has no more snapshots associated and is not a target + snapshots = self.rest.get_volume_snapshot_list(array, device_id) + __, snapvx_target_details = self.rest.find_snap_vx_sessions( + array, device_id, tgt_only=True) + if snapshots: + snapshot_names = ', '.join( + snap.get('snapshotName') for snap in snapshots) + raise exception.VolumeBackendAPIException(_( + 'Cannot delete device %s as it currently has the following ' + 'active snapshots: %s. Please try again once these snapshots ' + 'are no longer active.') % (device_id, snapshot_names)) + if snapvx_target_details: + source_device = snapvx_target_details.get('source_vol_id') + snapshot_name = snapvx_target_details.get('snap_name') + raise exception.VolumeBackendAPIException(_( + 'Cannot delete device %s as it is currently a linked target ' + 'of snapshot %s. The source device of this link is %s. ' + 'Please try again once this snapshots is no longer ' + 'active.') % (device_id, snapshot_name, source_device)) + # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) @@ -2910,111 +2911,6 @@ class PowerMaxCommon(object): self._delete_from_srp( array, target_device_id, clone_name, extra_specs) - @retry(retry_exc_tuple, interval=1, retries=3) - def _sync_check(self, array, device_id, extra_specs, - tgt_only=False, source_device_id=None): - """Check if volume is part of a SnapVx sync process. - - :param array: the array serial number - :param device_id: volume instance - :param tgt_only: Flag - return only sessions where device is target - :param extra_specs: extra specifications - :param tgt_only: Flag to specify if it is a target - :param source_device_id: source_device_id if it has one - """ - if not source_device_id and tgt_only: - source_device_id = self._get_target_source_device( - array, device_id) - if source_device_id: - @coordination.synchronized("emc-source-{src_device_id}") - def do_unlink_and_delete_snap(src_device_id): - LOG.debug("Locking on source device %(device_id)s.", - {'device_id': src_device_id}) - self._do_sync_check( - array, device_id, extra_specs, tgt_only) - - do_unlink_and_delete_snap(source_device_id) - else: - self._do_sync_check( - array, device_id, extra_specs, tgt_only) - - def _do_sync_check( - self, array, device_id, extra_specs, tgt_only=False): - """Check if volume is part of a SnapVx sync process. - - :param array: the array serial number - :param device_id: volume instance - :param tgt_only: Flag - return only sessions where device is target - :param extra_specs: extra specifications - :param tgt_only: Flag to specify if it is a target - """ - snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( - array, device_id) - get_sessions = True if snapvx_tgt or snapvx_src else False - - if get_sessions: - src_sessions, tgt_session = self.rest.find_snap_vx_sessions( - array, device_id, tgt_only) - if tgt_session: - self._unlink_targets_and_delete_temp_snapvx( - tgt_session, array, extra_specs) - if src_sessions and not tgt_only: - if not self.rest.is_snap_id: - src_sessions.sort(key=lambda k: k['snapid'], reverse=True) - for session in src_sessions: - self._unlink_targets_and_delete_temp_snapvx( - session, array, extra_specs) - - def _unlink_targets_and_delete_temp_snapvx( - self, session, array, extra_specs): - """Unlink target and delete the temporary snapvx if valid candidate. - - :param session: the snapvx session - :param array: the array serial number - :param extra_specs: extra specifications - """ - snap_name = session.get('snap_name') - if not snap_name: - msg = _("Snap_name not present in volume's snapvx session. " - "Unable to perform unlink and cleanup.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(msg) - source = session.get('source_vol_id') - snap_id = session.get('snapid') - is_legacy = 'EMC_SMI' in snap_name - is_temp = utils.CLONE_SNAPSHOT_NAME in snap_name - - target, cm_enabled = None, False - if session.get('target_vol_id'): - target = session.get('target_vol_id') - cm_enabled = session.get('copy_mode') - - if target and snap_name: - loop = True if cm_enabled else False - LOG.debug( - "Unlinking source from target. Source: %(vol)s, Target: " - "%(tgt)s, Snap id: %(snapid)s.", {'vol': source, 'tgt': target, - 'snapid': snap_id}) - self.provision.unlink_snapvx_tgt_volume( - array, target, source, snap_name, extra_specs, snap_id, - loop) - - # Candidates for deletion: - # 1. If legacy snapshot with 'EMC_SMI' in snapshot name - # 2. If snapVX snapshot is temporary - # If a target is present in the snap session it will be unlinked by - # the provision.unlink_snapvx_tgt_volume call above. This accounts - # for both CM enabled and disabled, as such by this point there will - # either be no target or it would have been unlinked, therefore - # delete if it is legacy or temp. - if is_legacy or is_temp: - LOG.debug( - "Deleting temporary snapshot. Source: %(vol)s, snap name: " - "%(name)s, snap id: %(snapid)s.", { - 'vol': source, 'name': snap_name, 'snapid': snap_id}) - self.provision.delete_temp_volume_snap( - array, snap_name, source, snap_id) - def _get_target_source_device(self, array, device_id): """Get the source device id of the target. @@ -3036,14 +2932,14 @@ class PowerMaxCommon(object): return source_device_id - def _clone_check( - self, array, device_id, extra_specs, force_unlink=False): + @retry(retry_exc_tuple, interval=1, retries=3) + def _cleanup_device_snapvx( + self, array, device_id, extra_specs): """Perform any snapvx cleanup before creating clones or snapshots :param array: the array serial :param device_id: the device ID of the volume :param extra_specs: extra specifications - :param force_unlink: force unlink even if not expired """ snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( array, device_id) @@ -3053,47 +2949,109 @@ class PowerMaxCommon(object): def do_unlink_and_delete_snap(src_device_id): src_sessions, tgt_session = self.rest.find_snap_vx_sessions( array, src_device_id) - count = 0 - if tgt_session and count < self.snapvx_unlink_limit: - self._delete_valid_snapshot( - array, tgt_session, extra_specs, force_unlink) - count += 1 + if tgt_session: + self._unlink_and_delete_temporary_snapshots( + tgt_session, array, extra_specs) if src_sessions: - for session in src_sessions: - if count < self.snapvx_unlink_limit: - self._delete_valid_snapshot( - array, session, extra_specs, force_unlink) - count += 1 - else: - break - + if not self.rest.is_snap_id: + src_sessions.sort( + key=lambda k: k['snapid'], reverse=True) + for src_session in src_sessions: + self._unlink_and_delete_temporary_snapshots( + src_session, array, extra_specs) do_unlink_and_delete_snap(device_id) - def _delete_valid_snapshot( - self, array, session, extra_specs, force_unlink=False): - """Delete a snapshot if valid candidate for deletion. + def _unlink_and_delete_temporary_snapshots( + self, session, array, extra_specs): + """Helper for unlinking and deleting temporary snapshot sessions - :param array: the array serial - :param session: the snapvx session + :param session: snapvx session + :param array: the array serial number :param extra_specs: extra specifications - :param force_unlink: force unlink even if not expired """ - is_legacy = 'EMC_SMI' in session['snap_name'] - is_temp = utils.CLONE_SNAPSHOT_NAME in session['snap_name'] - is_expired = session['expired'] - if force_unlink: - is_expired = True - is_valid = True if is_legacy or (is_temp and is_expired) else False - if is_valid: - try: - self._unlink_targets_and_delete_temp_snapvx( - session, array, extra_specs) - except exception.VolumeBackendAPIException as e: - # Ignore and continue as snapshot has been unlinked - # successfully with incorrect status code returned - if ('404' and session['snap_name'] and - 'does not exist' in six.text_type(e)): - pass + try: + session_unlinked = self._unlink_snapshot( + session, array, extra_specs) + if session_unlinked: + self._delete_temp_snapshot(session, array) + except exception.VolumeBackendAPIException as e: + # Ignore and continue as snapshot has been unlinked + # successfully with incorrect status code returned + if ('404' and session['snap_name'] and + 'does not exist' in six.text_type(e)): + pass + + def _unlink_snapshot(self, session, array, extra_specs): + """Helper for unlinking temporary snapshot during cleanup. + + :param session: session that contains snapshot + :param array: the array serial number + :param extra_specs: extra specifications + :return: + """ + snap_name = session.get('snap_name') + source = session.get('source_vol_id') + snap_id = session.get('snapid') + + snap_info = self.rest.get_volume_snap( + array, source, snap_name, snap_id) + is_linked = snap_info.get('linkedDevices') + + target, cm_enabled = None, False + if session.get('target_vol_id'): + target = session.get('target_vol_id') + cm_enabled = session.get('copy_mode') + + if target and snap_name and is_linked: + loop = True if cm_enabled else False + LOG.debug( + "Unlinking source from target. Source: %(vol)s, Target: " + "%(tgt)s, Snap id: %(snapid)s.", + {'vol': source, 'tgt': target, 'snapid': snap_id}) + self.provision.unlink_snapvx_tgt_volume( + array, target, source, snap_name, extra_specs, snap_id, + loop) + + is_unlinked = True + snap_info = self.rest.get_volume_snap( + array, source, snap_name, snap_id) + if snap_info and snap_info.get('linkedDevices'): + is_unlinked = False + return is_unlinked + + def _delete_temp_snapshot(self, session, array): + """Helper for deleting temporary snapshot during cleanup. + + :param session: Session that contains snapshot + :param array: the array serial number + """ + snap_name = session.get('snap_name') + source = session.get('source_vol_id') + snap_id = session.get('snapid') + LOG.debug( + "Deleting temp snapshot if it exists. Snap name is: " + "%(snap_name)s, Source is: %(source)s, " + "Snap id: %(snap_id)s.", + {'snap_name': snap_name, 'source': source, + 'snap_id': snap_id}) + is_legacy = 'EMC_SMI' in snap_name if snap_name else False + is_temp = ( + utils.CLONE_SNAPSHOT_NAME in snap_name if snap_name else False) + snap_info = self.rest.get_volume_snap( + array, source, snap_name, snap_id) + is_linked = snap_info.get('linkedDevices') if snap_info else False + + # Candidates for deletion: + # 1. If legacy snapshot with 'EMC_SMI' in snapshot name + # 2. If snapVX snapshot is temporary + # 3. Snapshot is unlinked. Call _unlink_snapshot before delete. + if (is_legacy or is_temp) and not is_linked: + LOG.debug( + "Deleting temporary snapshot. Source: %(vol)s, snap name: " + "%(name)s, snap id: %(snapid)s.", { + 'vol': source, 'name': snap_name, 'snapid': snap_id}) + self.provision.delete_temp_volume_snap( + array, snap_name, source, snap_id) def manage_existing(self, volume, external_ref): """Manages an existing PowerMax/VMAX Volume (import to Cinder). @@ -3367,7 +3325,7 @@ class PowerMaxCommon(object): {'id': volume_id}) else: # Check if volume is snap source - self._clone_check(array, device_id, extra_specs) + self._cleanup_device_snapvx(array, device_id, extra_specs) snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( array, device_id) if snapvx_src or snapvx_tgt: @@ -3933,13 +3891,7 @@ class PowerMaxCommon(object): :param extra_specs: the extra specifications :returns: bool """ - model_update, success = None, False - rep_info_dict, rep_extra_specs, rep_mode, rep_status = ( - None, None, None, False) - resume_target_sg, resume_original_sg = False, False - resume_original_sg_dict = dict() - orig_mgmt_sg_name = '' - is_partitioned = False + orig_mgmt_sg_name = None target_extra_specs = dict(new_type['extra_specs']) target_extra_specs.update({ @@ -3952,30 +3904,10 @@ class PowerMaxCommon(object): target_extra_specs) target_extra_specs.update( {utils.DISABLECOMPRESSION: compression_disabled}) - - was_rep_enabled = self.utils.is_replication_enabled(extra_specs) - if self.utils.is_replication_enabled(target_extra_specs): - target_backend_id = target_extra_specs.get( - utils.REPLICATION_DEVICE_BACKEND_ID, - utils.BACKEND_ID_LEGACY_REP) - target_rep_config = self.utils.get_rep_config( - target_backend_id, self.rep_configs) - rep_mode = target_rep_config['mode'] - target_extra_specs[utils.REP_MODE] = rep_mode - target_extra_specs[utils.REP_CONFIG] = target_rep_config - is_rep_enabled = True - else: - is_rep_enabled = False - - backend_ids_differ = False - if was_rep_enabled and is_rep_enabled: - curr_backend_id = extra_specs.get( - utils.REPLICATION_DEVICE_BACKEND_ID, - utils.BACKEND_ID_LEGACY_REP) - tgt_backend_id = target_extra_specs.get( - utils.REPLICATION_DEVICE_BACKEND_ID, - utils.BACKEND_ID_LEGACY_REP) - backend_ids_differ = curr_backend_id != tgt_backend_id + (was_rep_enabled, is_rep_enabled, backend_ids_differ, rep_mode, + target_extra_specs) = ( + self._get_replication_flags( + extra_specs, target_extra_specs)) if was_rep_enabled and not self.promotion: self._validate_rdfg_status(array, extra_specs) @@ -3992,51 +3924,26 @@ class PowerMaxCommon(object): rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped = ( False, False, False, False) + self._perform_snapshot_cleanup( + array, device_id, was_rep_enabled, is_rep_enabled, + backend_ids_differ, extra_specs, target_extra_specs) + try: # Scenario 1: Rep -> Non-Rep # Scenario 2: Cleanup for Rep -> Diff Rep type - if (was_rep_enabled and not is_rep_enabled) or backend_ids_differ: - if self.promotion: - resume_original_sg = False - rdf_group = extra_specs['rdf_group_no'] - is_partitioned = self._rdf_vols_partitioned( - array, [volume], rdf_group) - if not is_partitioned: - self.break_rdf_device_pair_session_promotion( - array, device_id, volume_name, extra_specs) - else: - rep_extra_specs, resume_original_sg = ( - self.break_rdf_device_pair_session( - array, device_id, volume_name, extra_specs, - volume)) - status = (REPLICATION_ERROR if self.promotion else - REPLICATION_DISABLED) - model_update = { - 'replication_status': status, - 'replication_driver_data': None} - rdf_pair_broken = True - if resume_original_sg: - resume_original_sg_dict = { - utils.ARRAY: array, - utils.SG_NAME: rep_extra_specs['mgmt_sg_name'], - utils.RDF_GROUP_NO: rep_extra_specs['rdf_group_no'], - utils.EXTRA_SPECS: rep_extra_specs} + (model_update, resume_original_sg_dict, rdf_pair_broken, + resume_original_sg, is_partitioned) = ( + self._prep_rep_to_non_rep( + array, device_id, volume_name, volume, was_rep_enabled, + is_rep_enabled, backend_ids_differ, extra_specs)) # Scenario 1: Non-Rep -> Rep # Scenario 2: Rep -> Diff Rep type - if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: - self._sync_check(array, device_id, extra_specs) - (rep_status, rep_driver_data, rep_info_dict, - rep_extra_specs, resume_target_sg) = ( - self.configure_volume_replication( - array, volume, device_id, target_extra_specs)) - if rep_status != 'first_vol_in_rdf_group': - rdf_pair_created = True - model_update = { - 'replication_status': rep_status, - 'replication_driver_data': six.text_type( - {'device_id': rep_info_dict['target_device_id'], - 'array': rep_info_dict['remote_array']})} + (model_update, rdf_pair_created, rep_status, rep_driver_data, + rep_info_dict, rep_extra_specs, resume_target_sg) = ( + self._prep_non_rep_to_rep( + array, device_id, volume, was_rep_enabled, + is_rep_enabled, backend_ids_differ, target_extra_specs)) success, target_sg_name = self._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, @@ -4128,6 +4035,185 @@ class PowerMaxCommon(object): finally: raise e + def _get_replication_flags(self, extra_specs, target_extra_specs): + """Get replication flags from extra specifications. + + :param extra_specs: extra specification -- dict + :param target_extra_specs: target extra specification -- dict + :returns: was_rep_enabled -- bool, is_rep_enabled -- bool, + backend_ids_differ -- bool, rep_mode -- str, + target_extra_specs -- dict + """ + rep_mode = None + was_rep_enabled = self.utils.is_replication_enabled(extra_specs) + if self.utils.is_replication_enabled(target_extra_specs): + target_backend_id = target_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + target_rep_config = self.utils.get_rep_config( + target_backend_id, self.rep_configs) + rep_mode = target_rep_config['mode'] + target_extra_specs[utils.REP_MODE] = rep_mode + target_extra_specs[utils.REP_CONFIG] = target_rep_config + is_rep_enabled = True + else: + is_rep_enabled = False + + backend_ids_differ = False + if was_rep_enabled and is_rep_enabled: + curr_backend_id = extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + tgt_backend_id = target_extra_specs.get( + utils.REPLICATION_DEVICE_BACKEND_ID, + utils.BACKEND_ID_LEGACY_REP) + backend_ids_differ = curr_backend_id != tgt_backend_id + + return (was_rep_enabled, is_rep_enabled, backend_ids_differ, rep_mode, + target_extra_specs) + + def _prep_non_rep_to_rep( + self, array, device_id, volume, was_rep_enabled, + is_rep_enabled, backend_ids_differ, target_extra_specs): + """Prepare for non rep to rep retype. + + :param array: the array serial number -- str + :param device_id: the device id -- str + :param volume: the volume object -- objects.Volume + :param was_rep_enabled: flag -- bool + :param is_rep_enabled: flag -- bool + :param backend_ids_differ: flag -- bool + :param target_extra_specs: target extra specs -- dict + :returns: model_update -- dict, rdf_pair_created -- bool, + rep_status -- str, rep_driver_data -- dict, + rep_info_dict -- dict, rep_extra_specs -- dict, + resume_target_sg -- bool + """ + model_update, rep_status = None, None + resume_target_sg = False + rdf_pair_created = False + rep_driver_data, rep_info_dict = dict(), dict() + rep_extra_specs = dict() + if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: + (rep_status, rep_driver_data, rep_info_dict, + rep_extra_specs, resume_target_sg) = ( + self.configure_volume_replication( + array, volume, device_id, target_extra_specs)) + if rep_status != 'first_vol_in_rdf_group': + rdf_pair_created = True + model_update = { + 'replication_status': rep_status, + 'replication_driver_data': six.text_type( + {'device_id': rep_info_dict['target_device_id'], + 'array': rep_info_dict['remote_array']})} + + return (model_update, rdf_pair_created, rep_status, rep_driver_data, + rep_info_dict, rep_extra_specs, resume_target_sg) + + def _prep_rep_to_non_rep( + self, array, device_id, volume_name, volume, was_rep_enabled, + is_rep_enabled, backend_ids_differ, extra_specs): + """Preparation for replication to non-replicated. + + :param array: the array serial number -- str + :param device_id: device_id: the device id -- str + :param volume_name: the volume name -- str + :param volume: the volume object -- objects.Volume + :param was_rep_enabled: flag -- bool + :param is_rep_enabled: flag -- bool + :param backend_ids_differ: flag -- bool + :param extra_specs: extra specs -- dict + :returns: model_update --dict , resume_original_sg_dict -- dict, + rdf_pair_broken -- bool, resume_original_sg -- bool, + is_partitioned -- bool + """ + model_update = dict() + resume_original_sg_dict = dict() + rdf_pair_broken = False + resume_original_sg = False + is_partitioned = False + if (was_rep_enabled and not is_rep_enabled) or backend_ids_differ: + if self.promotion: + resume_original_sg = False + rdf_group = extra_specs['rdf_group_no'] + is_partitioned = self._rdf_vols_partitioned( + array, [volume], rdf_group) + if not is_partitioned: + self.break_rdf_device_pair_session_promotion( + array, device_id, volume_name, extra_specs) + else: + rep_extra_specs, resume_original_sg = ( + self.break_rdf_device_pair_session( + array, device_id, volume_name, extra_specs, + volume)) + status = (REPLICATION_ERROR if self.promotion else + REPLICATION_DISABLED) + model_update = { + 'replication_status': status, + 'replication_driver_data': None} + rdf_pair_broken = True + if resume_original_sg: + resume_original_sg_dict = { + utils.ARRAY: array, + utils.SG_NAME: rep_extra_specs['mgmt_sg_name'], + utils.RDF_GROUP_NO: rep_extra_specs['rdf_group_no'], + utils.EXTRA_SPECS: rep_extra_specs} + return (model_update, resume_original_sg_dict, rdf_pair_broken, + resume_original_sg, is_partitioned) + + def _perform_snapshot_cleanup( + self, array, device_id, was_rep_enabled, is_rep_enabled, + backend_ids_differ, extra_specs, target_extra_specs): + """Perform snapshot cleanup. + + Perform snapshot cleanup before any other changes. If retyping + to either async or metro then there should be no linked snapshots + on the volume. + :param array: the array serial number -- str + :param device_id: device_id: the device id -- str + :param was_rep_enabled: flag -- bool + :param is_rep_enabled: flag -- bool + :param backend_ids_differ: flag -- bool + :param extra_specs: extra specs -- dict + :param target_extra_specs: target extra specs -- dict + """ + if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: + target_rep_mode = target_extra_specs.get(utils.REP_MODE) + target_is_async = target_rep_mode == utils.REP_ASYNC + target_is_metro = target_rep_mode == utils.REP_METRO + if target_is_async or target_is_metro: + self._cleanup_device_snapvx(array, device_id, extra_specs) + snapshots = self.rest.get_volume_snapshot_list( + array, device_id) + __, snapvx_target_details = self.rest.find_snap_vx_sessions( + array, device_id, tgt_only=True) + + linked_snapshots = list() + for snapshot in snapshots: + linked_devices = snapshot.get('linkedDevices') + if linked_devices: + snapshot_name = snapshot.get('snapshotName') + linked_snapshots.append(snapshot_name) + if linked_snapshots: + snapshot_names = ', '.join(linked_snapshots) + raise exception.VolumeBackendAPIException(_( + 'Unable to complete retype as volume has active' + 'snapvx links. Cannot retype to Asynchronous or ' + 'Metro modes while the volume has active links. ' + 'Please wait until these snapvx operations have ' + 'completed and try again. Snapshots: ' + '%s') % snapshot_names) + if snapvx_target_details: + source_vol_id = snapvx_target_details.get('source_vol_id') + snap_name = snapvx_target_details.get('snap_name') + raise exception.VolumeBackendAPIException(_( + 'Unable to complete retype as volume is a snapvx ' + 'target. Cannot retype to Asynchronous or Metro ' + 'modes in this state. Please wait until these snapvx ' + 'operations complete and try again. Volume %s is ' + 'currently a target of snapshot %s with source device ' + '%s') % (device_id, snap_name, source_vol_id)) + def _cleanup_on_migrate_failure( self, rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped, extra_specs, target_extra_specs, volume, @@ -5168,7 +5254,7 @@ class PowerMaxCommon(object): {'sourceName': volume_name}) device_id = volume_dict['device_id'] # Check if volume is snap target (e.g. if clone volume) - self._sync_check(array, device_id, extra_specs) + self._cleanup_device_snapvx(array, device_id, extra_specs) # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) @@ -5571,6 +5657,61 @@ class PowerMaxCommon(object): array, vol_grp_name) deleted_volume_device_ids = [] + # If volumes are being deleted along with the group, ensure snapshot + # cleanup completes before doing any replication/storage group cleanup. + remaining_device_snapshots = list() + remaining_snapvx_targets = list() + for vol in volumes: + extra_specs = self._initial_setup(vol) + device_id = self._find_device_on_array(vol, extra_specs) + self._cleanup_device_snapvx(array, device_id, extra_specs) + snapshots = self.rest.get_volume_snapshot_list(array, device_id) + __, snapvx_target_details = self.rest.find_snap_vx_sessions( + array, device_id, tgt_only=True) + if snapshots: + snapshot_names = ', '.join( + snap.get('snapshotName') for snap in snapshots) + snap_details = { + 'device_id': device_id, 'snapshot_names': snapshot_names} + remaining_device_snapshots.append(snap_details) + if snapvx_target_details: + source_vol_id = snapvx_target_details.get('source_vol_id') + snap_name = snapvx_target_details.get('snap_name') + target_details = { + 'device_id': device_id, 'source_vol_id': source_vol_id, + 'snapshot_name': snap_name} + remaining_snapvx_targets.append(target_details) + + # Fail out if volumes to be deleted still have snapshots. + if remaining_device_snapshots: + for details in remaining_device_snapshots: + device_id = details.get('device_id') + snapshot_names = details.get('snapshot_names') + LOG.error('Cannot delete device %s, it has the ' + 'following active snapshots, %s.', + device_id, snapshot_names) + raise exception.VolumeBackendAPIException(_( + 'Group volumes have active snapshots. Cannot perform group ' + 'delete. Wait for snapvx sessions to complete their ' + 'processes or remove volumes from group before attempting ' + 'to delete again. Please see previously logged error ' + 'message for device and snapshot details.')) + + if remaining_snapvx_targets: + for details in remaining_snapvx_targets: + device_id = details.get('device_id') + snap_name = details.get('snapshot_name') + source_vol_id = details.get('source_vol_id') + LOG.error('Cannot delete device %s, it is current a target ' + 'of snapshot %s with source device id %s', + device_id, snap_name, source_vol_id) + raise exception.VolumeBackendAPIException(_( + 'Some group volumes are targets of a snapvx session. Cannot ' + 'perform group delete. Wait for snapvx sessions to complete ' + 'their processes or remove volumes from group before ' + 'attempting to delete again. Please see previously logged ' + 'error message for device and snapshot details.')) + # Remove replication for group, if applicable if group.is_replicated: vt_extra_specs = self.utils.get_volumetype_extra_specs( @@ -5591,11 +5732,8 @@ class PowerMaxCommon(object): interval_retries_dict) for vol in volumes: extra_specs = self._initial_setup(vol) - device_id = self._find_device_on_array( - vol, extra_specs) + device_id = self._find_device_on_array(vol, extra_specs) if device_id in volume_device_ids: - self._clone_check( - array, device_id, extra_specs, force_unlink=True) self.masking.remove_and_reset_members( array, vol, device_id, vol.name, extra_specs, False) @@ -6706,7 +6844,7 @@ class PowerMaxCommon(object): message=exception_message) else: snap_id = snap_id_list[0] - self._clone_check(array, sourcedevice_id, extra_specs) + self._cleanup_device_snapvx(array, sourcedevice_id, extra_specs) try: LOG.info("Reverting device: %(deviceid)s " "to snapshot: %(snapname)s.", diff --git a/cinder/volume/drivers/dell_emc/powermax/utils.py b/cinder/volume/drivers/dell_emc/powermax/utils.py index fdaff5d910f..aeed7b9d332 100644 --- a/cinder/volume/drivers/dell_emc/powermax/utils.py +++ b/cinder/volume/drivers/dell_emc/powermax/utils.py @@ -132,7 +132,6 @@ POWERMAX_ARRAY = 'powermax_array' POWERMAX_SRP = 'powermax_srp' POWERMAX_SERVICE_LEVEL = 'powermax_service_level' POWERMAX_PORT_GROUPS = 'powermax_port_groups' -POWERMAX_SNAPVX_UNLINK_LIMIT = 'powermax_snapvx_unlink_limit' POWERMAX_ARRAY_TAG_LIST = 'powermax_array_tag_list' POWERMAX_SHORT_HOST_NAME_TEMPLATE = 'powermax_short_host_name_template' POWERMAX_PORT_GROUP_NAME_TEMPLATE = 'powermax_port_group_name_template'