Merge "PowerMax Driver - Failover abilities promotion"
This commit is contained in:
commit
61e24eb8ab
@ -468,6 +468,10 @@ class PowerMaxData(object):
|
||||
rep_extra_specs_rep_config = deepcopy(rep_extra_specs6)
|
||||
rep_extra_specs_rep_config[utils.REP_CONFIG] = rep_config_sync
|
||||
|
||||
rep_extra_specs_rep_config_metro = deepcopy(rep_extra_specs6)
|
||||
rep_extra_specs_rep_config_metro[utils.REP_CONFIG] = rep_config_metro
|
||||
rep_extra_specs_rep_config_metro[utils.REP_MODE] = utils.REP_METRO
|
||||
|
||||
extra_specs_tags = deepcopy(extra_specs)
|
||||
extra_specs_tags.update({utils.STORAGE_GROUP_TAGS: sg_tags})
|
||||
|
||||
|
@ -577,6 +577,95 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
extra_specs, connector, False,
|
||||
async_grp=None, host_template=None)
|
||||
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'is_metro_device',
|
||||
return_value=True)
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload')
|
||||
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
|
||||
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
|
||||
return_value=(tpd.PowerMaxData.iscsi_device_info,
|
||||
False))
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_get_replication_extra_specs',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
|
||||
def test_unmap_lun_replication_metro(
|
||||
self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro):
|
||||
volume = deepcopy(self.data.test_volume)
|
||||
connector = deepcopy(self.data.connector)
|
||||
volume.volume_attachment.objects = [
|
||||
deepcopy(self.data.test_volume_attachment)]
|
||||
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
|
||||
extra_specs[utils.FORCE_VOL_REMOVE] = True
|
||||
self.common._unmap_lun(volume, connector)
|
||||
self.assertEqual(2, mck_rem.call_count)
|
||||
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'is_metro_device',
|
||||
return_value=True)
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload')
|
||||
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
|
||||
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
|
||||
return_value=(tpd.PowerMaxData.iscsi_device_info,
|
||||
False))
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_get_replication_extra_specs',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
|
||||
def test_unmap_lun_replication_metro_promotion(
|
||||
self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro):
|
||||
volume = deepcopy(self.data.test_volume)
|
||||
connector = deepcopy(self.data.connector)
|
||||
volume.volume_attachment.objects = [
|
||||
deepcopy(self.data.test_volume_attachment)]
|
||||
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
|
||||
extra_specs[utils.FORCE_VOL_REMOVE] = True
|
||||
self.common.promotion = True
|
||||
self.common._unmap_lun(volume, connector)
|
||||
self.common.promotion = False
|
||||
self.assertEqual(1, mck_rem.call_count)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_unmap_lun')
|
||||
@mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info')
|
||||
def test_unmap_lun_promotion_non_replicated_volume(
|
||||
self, mck_unmap, mck_info):
|
||||
volume = deepcopy(self.data.test_volume)
|
||||
connector = deepcopy(self.data.connector)
|
||||
ret = self.common._unmap_lun_promotion(volume, connector)
|
||||
self.assertIsNone(ret)
|
||||
self.assertEqual(0, mck_unmap.call_count)
|
||||
self.assertEqual(0, mck_info.call_count)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_unmap_lun')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
|
||||
def test_unmap_lun_promotion_replicated_metro_volume(
|
||||
self, mck_setup, mck_unmap):
|
||||
volume = deepcopy(self.data.test_rep_volume)
|
||||
connector = deepcopy(self.data.connector)
|
||||
self.common._unmap_lun_promotion(volume, connector)
|
||||
mck_setup.assert_called_once_with(volume)
|
||||
mck_unmap.assert_called_once_with(volume, connector)
|
||||
|
||||
@mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
|
||||
def test_unmap_lun_promotion_replicated_non_metro_volume(
|
||||
self, mck_setup, mck_capture):
|
||||
volume = deepcopy(self.data.test_rep_volume)
|
||||
connector = deepcopy(self.data.connector)
|
||||
extra_specs = self.data.rep_extra_specs_rep_config
|
||||
device_id = self.data.device_id
|
||||
promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION]
|
||||
self.common._unmap_lun_promotion(volume, connector)
|
||||
mck_setup.assert_called_once_with(volume)
|
||||
mck_capture.assert_called_once_with(
|
||||
volume, extra_specs, device_id, promotion_key, promotion_key)
|
||||
|
||||
def test_initialize_connection_already_mapped(self):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
@ -712,6 +801,17 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
mock_unmap.assert_called_once_with(
|
||||
volume, connector)
|
||||
|
||||
def test_terminate_connection_promotion(self):
|
||||
volume = self.data.test_volume
|
||||
connector = self.data.connector
|
||||
with mock.patch.object(
|
||||
self.common, '_unmap_lun_promotion') as mock_unmap:
|
||||
self.common.promotion = True
|
||||
self.common.terminate_connection(volume, connector)
|
||||
mock_unmap.assert_called_once_with(
|
||||
volume, connector)
|
||||
self.common.promotion = False
|
||||
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
|
||||
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
|
||||
def test_extend_vol_no_rep_success(self, mck_val_chk, mck_extend):
|
||||
@ -1975,6 +2075,30 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
host = self.data.new_host
|
||||
self.assertFalse(self.common.retype(volume, new_type, host))
|
||||
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload',
|
||||
return_value=(True, True))
|
||||
@mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration')
|
||||
def test_retype_promotion_extra_spec_update(
|
||||
self, mck_migrate, mck_slo, mck_setup):
|
||||
device_id = self.data.device_id
|
||||
volume_name = self.data.test_rep_volume.name
|
||||
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
|
||||
rep_config = extra_specs[utils.REP_CONFIG]
|
||||
rep_extra_specs = self.common._get_replication_extra_specs(
|
||||
extra_specs, rep_config)
|
||||
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
|
||||
volume = self.data.test_rep_volume
|
||||
new_type = {'extra_specs': {}}
|
||||
host = {'host': self.data.new_host}
|
||||
self.common.promotion = True
|
||||
self.common.retype(volume, new_type, host)
|
||||
self.common.promotion = False
|
||||
mck_migrate.assert_called_once_with(
|
||||
device_id, volume, host, volume_name, new_type, rep_extra_specs)
|
||||
|
||||
def test_slo_workload_migration_valid(self):
|
||||
device_id = self.data.device_id
|
||||
volume_name = self.data.test_volume.name
|
||||
@ -2222,20 +2346,23 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
ref_return = (True, 'Silver', 'OLTP')
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
# No current sgs found
|
||||
with mock.patch.object(self.rest, 'get_storage_groups_from_volume',
|
||||
return_value=None):
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array, self.data.srp,
|
||||
volume_name, False, False)
|
||||
volume_name, False, False, self.data.slo, self.data.workload,
|
||||
False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'}
|
||||
ref_return = (True, 'Silver', 'NONE')
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_false(self):
|
||||
@ -2246,25 +2373,29 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123+dummy+data'}
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
# Wrong array
|
||||
host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'}
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host2, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
# Wrong srp
|
||||
host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'}
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host3, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
# Already in correct sg
|
||||
host4 = {'host': self.data.fake_host}
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host4, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_next_gen(self):
|
||||
@ -2276,9 +2407,66 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
return_value=True):
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False)
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_promotion_change_comp(
|
||||
self):
|
||||
device_id = self.data.device_id
|
||||
host = {'host': self.data.new_host}
|
||||
volume_name = self.data.test_volume.name
|
||||
ref_return = (False, None, None)
|
||||
self.common.promotion = True
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, True, False, self.data.slo_silver,
|
||||
self.data.workload, False)
|
||||
self.common.promotion = False
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_promotion_change_slo(
|
||||
self):
|
||||
device_id = self.data.device_id
|
||||
host = {'host': self.data.new_host}
|
||||
volume_name = self.data.test_volume.name
|
||||
ref_return = (False, None, None)
|
||||
self.common.promotion = True
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False, self.data.slo,
|
||||
self.data.workload, False)
|
||||
self.common.promotion = False
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_promotion_change_workload(
|
||||
self):
|
||||
device_id = self.data.device_id
|
||||
host = {'host': self.data.new_host}
|
||||
volume_name = self.data.test_volume.name
|
||||
ref_return = (False, None, None)
|
||||
self.common.promotion = True
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False, self.data.slo_silver,
|
||||
'fail_workload', False)
|
||||
self.common.promotion = False
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_is_valid_for_storage_assisted_migration_promotion_target_not_rep(
|
||||
self):
|
||||
device_id = self.data.device_id
|
||||
host = {'host': self.data.new_host}
|
||||
volume_name = self.data.test_volume.name
|
||||
ref_return = (False, None, None)
|
||||
self.common.promotion = True
|
||||
return_val = self.common._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, self.data.array,
|
||||
self.data.srp, volume_name, False, False, self.data.slo_silver,
|
||||
'OLTP', True)
|
||||
self.common.promotion = False
|
||||
self.assertEqual(ref_return, return_val)
|
||||
|
||||
def test_find_volume_group(self):
|
||||
group = self.data.test_group_1
|
||||
array = self.data.array
|
||||
@ -2442,9 +2630,8 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
add_vols = [self.data.test_volume]
|
||||
remove_vols = []
|
||||
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
|
||||
model_update, __, __ = self.common.update_group(group,
|
||||
add_vols,
|
||||
remove_vols)
|
||||
model_update, __, __ = self.common.update_group(
|
||||
group, add_vols, remove_vols)
|
||||
self.assertEqual(ref_model_update, model_update)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
|
||||
@ -2476,13 +2663,100 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
with mock.patch.object(
|
||||
rest.PowerMaxRest, 'is_volume_in_storagegroup',
|
||||
return_value=False) as mock_exists:
|
||||
model_update, __, __ = self.common.update_group(group,
|
||||
add_vols,
|
||||
remove_vols)
|
||||
model_update, __, __ = self.common.update_group(
|
||||
group, add_vols, remove_vols)
|
||||
mock_exists.assert_called_once()
|
||||
|
||||
self.assertEqual(ref_model_update, model_update)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=False)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
def test_update_group_failover_failure(
|
||||
self, mock_cg_type, mock_type_check):
|
||||
group = self.data.test_group_1
|
||||
add_vols = []
|
||||
remove_vols = [self.data.test_volume_group_member]
|
||||
self.common.failover = True
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException, self.common.update_group,
|
||||
group, add_vols, remove_vols)
|
||||
self.common.failover = False
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=False)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
@mock.patch.object(common.PowerMaxCommon, '_update_group_promotion')
|
||||
def test_update_group_during_promotion(
|
||||
self, mck_update, mock_cg_type, mock_type_check):
|
||||
group = self.data.test_group_1
|
||||
add_vols = []
|
||||
remove_vols = [self.data.test_volume_group_member]
|
||||
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
|
||||
self.common.promotion = True
|
||||
model_update, __, __ = self.common.update_group(
|
||||
group, add_vols, remove_vols)
|
||||
self.common.promotion = False
|
||||
mck_update.assert_called_once_with(group, add_vols, remove_vols)
|
||||
self.assertEqual(ref_model_update, model_update)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
|
||||
return_value=True)
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_get_replication_extra_specs',
|
||||
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.ex_specs_rep_config)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=True)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
@mock.patch.object(
|
||||
masking.PowerMaxMasking, 'remove_volumes_from_storage_group')
|
||||
def test_update_group_promotion(
|
||||
self, mck_rem, mock_cg_type, mock_type_check, mck_setup, mck_rep,
|
||||
mck_in_sg):
|
||||
group = self.data.test_rep_group
|
||||
add_vols = []
|
||||
remove_vols = [self.data.test_volume_group_member]
|
||||
remote_array = self.data.remote_array
|
||||
device_id = [self.data.device_id]
|
||||
group_name = self.data.storagegroup_name_source
|
||||
interval_retries_dict = {utils.INTERVAL: 1,
|
||||
utils.RETRIES: 1,
|
||||
utils.FORCE_VOL_REMOVE: True}
|
||||
self.common._update_group_promotion(group, add_vols, remove_vols)
|
||||
mck_rem.assert_called_once_with(
|
||||
remote_array, device_id, group_name, interval_retries_dict)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=False)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
def test_update_group_promotion_non_replicated(
|
||||
self, mock_cg_type, mock_type_check):
|
||||
group = self.data.test_group_failed
|
||||
add_vols = []
|
||||
remove_vols = [self.data.test_volume_group_member]
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.common._update_group_promotion,
|
||||
group, add_vols, remove_vols)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=True)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
def test_update_group_promotion_add_volumes(
|
||||
self, mock_cg_type, mock_type_check):
|
||||
group = self.data.test_rep_group
|
||||
add_vols = [self.data.test_volume_group_member]
|
||||
remove_vols = []
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.common._update_group_promotion,
|
||||
group, add_vols, remove_vols)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
|
||||
def test_delete_group(self, mock_check):
|
||||
group = self.data.test_group_1
|
||||
@ -3702,7 +3976,7 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
device_id = self.data.device_id
|
||||
volume = self.data.test_attached_volume
|
||||
volume_name = self.data.volume_id
|
||||
extra_specs = self.data.rep_extra_specs
|
||||
extra_specs = self.data.rep_extra_specs_rep_config
|
||||
target_slo = self.data.slo_silver
|
||||
target_workload = self.data.workload
|
||||
target_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
@ -3738,7 +4012,7 @@ class PowerMaxCommonTest(test.TestCase):
|
||||
device_id = self.data.device_id
|
||||
volume = self.data.test_attached_volume
|
||||
volume_name = self.data.volume_id
|
||||
extra_specs = self.data.rep_extra_specs
|
||||
extra_specs = self.data.rep_extra_specs_rep_config
|
||||
target_slo = self.data.slo_silver
|
||||
target_workload = self.data.workload
|
||||
target_extra_specs = deepcopy(self.data.rep_extra_specs)
|
||||
|
@ -269,7 +269,7 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
secondary_id, volume_update_list, group_update_list = (
|
||||
self.common.failover_host(volumes, backend_id, groups))
|
||||
mck_validate.assert_called_once_with(
|
||||
False, backend_id, rep_configs, self.data.array, ['123'])
|
||||
False, backend_id, rep_configs, self.data.array, ['123'], False)
|
||||
mck_populate.assert_called_once_with(volumes, groups, None)
|
||||
self.assertEqual(backend_id, secondary_id)
|
||||
self.assertEqual('vol_list', volume_update_list)
|
||||
@ -286,7 +286,56 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.common.failover_host, volumes, backend_id)
|
||||
mck_validate.assert_called_once_with(
|
||||
False, backend_id, rep_configs, self.data.array, ['123'])
|
||||
False, backend_id, rep_configs, self.data.array, ['123'], False)
|
||||
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_populate_volume_and_group_update_lists')
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request',
|
||||
return_value=(True, 'val'))
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_arrays_list',
|
||||
return_value=['123'])
|
||||
def test_failover_host_start_promotion(
|
||||
self, mck_arrays, mck_validate, mck_populate):
|
||||
volumes = [self.data.test_volume, self.data.test_clone_volume]
|
||||
groups = [self.data.test_group]
|
||||
backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION
|
||||
rep_configs = self.common.rep_configs
|
||||
secondary_id, volume_update_list, group_update_list = (
|
||||
self.common.failover_host(volumes, backend_id, groups))
|
||||
self.assertEqual(0, mck_populate.call_count)
|
||||
self.assertEqual(backend_id, secondary_id)
|
||||
self.assertEqual(list(), volume_update_list)
|
||||
self.assertEqual(list(), group_update_list)
|
||||
self.assertEqual(self.common.promotion, True)
|
||||
self.common.promotion = False
|
||||
mck_validate.assert_called_once_with(
|
||||
False, backend_id, rep_configs, self.data.array, ['123'], False)
|
||||
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_populate_volume_and_group_update_lists',
|
||||
return_value=(list(), list()))
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request',
|
||||
return_value=(True, 'val'))
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_arrays_list',
|
||||
return_value=['123'])
|
||||
def test_failover_host_complete_promotion(
|
||||
self, mck_arrays, mck_validate, mck_populate):
|
||||
volume = deepcopy(self.data.test_rep_volume)
|
||||
volume.replication_status = fields.ReplicationStatus.ERROR
|
||||
volumes = [volume]
|
||||
groups = [self.data.test_group]
|
||||
backend_id = 'default'
|
||||
rep_configs = self.common.rep_configs
|
||||
self.common.promotion = True
|
||||
secondary_id, volume_update_list, group_update_list = (
|
||||
self.common.failover_host(volumes, backend_id, groups))
|
||||
mck_populate.assert_called_once_with(volumes, groups, None)
|
||||
mck_validate.assert_called_once_with(
|
||||
False, backend_id, rep_configs, self.data.array, ['123'], True)
|
||||
self.assertEqual(backend_id, secondary_id)
|
||||
self.assertEqual(list(), volume_update_list)
|
||||
self.assertEqual(list(), group_update_list)
|
||||
self.assertEqual(self.common.promotion, False)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon,
|
||||
'_update_volume_list_from_sync_vol_list',
|
||||
@ -316,6 +365,23 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
'updates': 'grp_updates'}]
|
||||
self.assertEqual(group_updates_ref, group_updates)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
|
||||
return_value=tpd.PowerMaxData.extra_specs)
|
||||
def test_populate_volume_and_group_update_lists_promotion_non_rep(
|
||||
self, mck_setup):
|
||||
volumes = [self.data.test_volume]
|
||||
groups = []
|
||||
ref_model_update = {
|
||||
'volume_id': volumes[0].id,
|
||||
'updates': {
|
||||
'replication_status': fields.ReplicationStatus.DISABLED}}
|
||||
self.common.promotion = True
|
||||
volume_updates, group_updates = (
|
||||
self.common._populate_volume_and_group_update_lists(
|
||||
volumes, groups, None))
|
||||
self.common.promotion = False
|
||||
self.assertEqual(ref_model_update, volume_updates[0])
|
||||
|
||||
def test_failover_replication_empty_group(self):
|
||||
with mock.patch.object(volume_utils, 'is_group_a_type',
|
||||
return_value=True):
|
||||
@ -370,6 +436,25 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
self.assertEqual(fields.ReplicationStatus.ERROR,
|
||||
model_update['replication_status'])
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_rdf_vols_partitioned',
|
||||
return_value=True)
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group',
|
||||
return_value=tpd.PowerMaxData.rdf_group_no_1)
|
||||
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
|
||||
return_value=tpd.PowerMaxData.rdf_group_no_1)
|
||||
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
|
||||
return_value=tpd.PowerMaxData.test_group)
|
||||
def test_failover_replication_failover_partitioned(
|
||||
self, mck_find_vol_grp, mck_get_rdf_grp, mck_failover, mck_part):
|
||||
volumes = [self.data.test_volume_group_member]
|
||||
vol_group = self.data.test_group
|
||||
vol_grp_name = self.data.test_group.name
|
||||
model_update, __ = self.common._failover_replication(
|
||||
volumes, vol_group, vol_grp_name, host=True)
|
||||
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
|
||||
model_update['replication_status'])
|
||||
self.assertEqual(0, mck_failover.call_count)
|
||||
|
||||
@mock.patch.object(common.PowerMaxCommon, '_failover_replication',
|
||||
return_value=({}, {}))
|
||||
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
|
||||
@ -438,6 +523,60 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
extra_specs, rep_config)
|
||||
self.assertEqual(rep_specs, rep_extra_specs)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value={utils.RDF_PAIR_STATE: utils.RDF_PARTITIONED_STATE})
|
||||
def test_rdf_vols_partitioned_true_partitioned(self, mck_pair):
|
||||
array = self.data.array
|
||||
volumes = [self.data.test_rep_volume]
|
||||
rdfg = self.data.rdf_group_no_1
|
||||
device_id = self.data.device_id2
|
||||
is_partitioned = self.common._rdf_vols_partitioned(
|
||||
array, volumes, rdfg)
|
||||
self.assertTrue(is_partitioned)
|
||||
mck_pair.assert_called_once_with(array, rdfg, device_id)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value={utils.RDF_PAIR_STATE: utils.RDF_TRANSIDLE_STATE})
|
||||
def test_rdf_vols_partitioned_true_transidle(self, mck_pair):
|
||||
array = self.data.array
|
||||
volumes = [self.data.test_rep_volume]
|
||||
rdfg = self.data.rdf_group_no_1
|
||||
device_id = self.data.device_id2
|
||||
is_partitioned = self.common._rdf_vols_partitioned(
|
||||
array, volumes, rdfg)
|
||||
self.assertTrue(is_partitioned)
|
||||
mck_pair.assert_called_once_with(array, rdfg, device_id)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value={utils.RDF_PAIR_STATE: utils.RDF_SUSPENDED_STATE})
|
||||
def test_rdf_vols_partitioned_false(self, mck_pair):
|
||||
array = self.data.array
|
||||
volumes = [self.data.test_rep_volume]
|
||||
rdfg = self.data.rdf_group_no_1
|
||||
device_id = self.data.device_id2
|
||||
is_partitioned = self.common._rdf_vols_partitioned(
|
||||
array, volumes, rdfg)
|
||||
self.assertFalse(is_partitioned)
|
||||
mck_pair.assert_called_once_with(array, rdfg, device_id)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.PowerMaxRest, 'get_rdf_pair_volume',
|
||||
return_value={utils.RDF_PAIR_STATE: utils.RDF_PARTITIONED_STATE})
|
||||
def test_rdf_vols_partitioned_true_promotion(self, mck_pair):
|
||||
self.common.promotion = True
|
||||
array = self.data.array
|
||||
volumes = [self.data.test_rep_volume]
|
||||
rdfg = self.data.rdf_group_no_1
|
||||
device_id = self.data.device_id
|
||||
is_partitioned = self.common._rdf_vols_partitioned(
|
||||
array, volumes, rdfg)
|
||||
self.assertTrue(is_partitioned)
|
||||
self.common.promotion = False
|
||||
mck_pair.assert_called_once_with(array, rdfg, device_id)
|
||||
|
||||
def test_get_secondary_stats(self):
|
||||
rep_config = self.data.rep_config_sync
|
||||
array_map = self.common.get_attributes_from_cinder_config()
|
||||
@ -1001,6 +1140,86 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
self.assertTrue(success)
|
||||
self.assertEqual(self.data.replication_model, model_update)
|
||||
|
||||
@mock.patch.object(
|
||||
provision.PowerMaxProvision, 'verify_slo_workload',
|
||||
return_value=(True, True))
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, 'break_rdf_device_pair_session_promotion')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_retype_volume',
|
||||
return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name))
|
||||
def test_migrate_volume_success_rep_promotion(
|
||||
self, mck_retype, mck_get, mck_break, mck_valid):
|
||||
array_id = self.data.array
|
||||
volume = self.data.test_rep_volume
|
||||
device_id = self.data.device_id
|
||||
srp = self.data.srp
|
||||
target_slo = self.data.slo_silver
|
||||
target_workload = self.data.workload
|
||||
volume_name = volume.name
|
||||
new_type = {'extra_specs': {}}
|
||||
extra_specs = self.data.rep_extra_specs_rep_config
|
||||
self.common.promotion = True
|
||||
target_extra_specs = {
|
||||
utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo,
|
||||
utils.WORKLOAD: target_workload,
|
||||
utils.INTERVAL: extra_specs[utils.INTERVAL],
|
||||
utils.RETRIES: extra_specs[utils.RETRIES],
|
||||
utils.DISABLECOMPRESSION: False}
|
||||
success, model_update = self.common._migrate_volume(
|
||||
array_id, volume, device_id, srp, target_slo, target_workload,
|
||||
volume_name, new_type, extra_specs)
|
||||
mck_break.assert_called_once_with(
|
||||
array_id, device_id, volume_name, extra_specs)
|
||||
mck_retype.assert_called_once_with(
|
||||
array_id, srp, device_id, volume, volume_name, extra_specs,
|
||||
target_slo, target_workload, target_extra_specs)
|
||||
self.assertTrue(success)
|
||||
self.common.promotion = False
|
||||
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_rdf_vols_partitioned',
|
||||
return_value=True)
|
||||
@mock.patch.object(
|
||||
provision.PowerMaxProvision, 'verify_slo_workload',
|
||||
return_value=(True, True))
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, 'break_rdf_device_pair_session_promotion')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
|
||||
@mock.patch.object(
|
||||
common.PowerMaxCommon, '_retype_volume',
|
||||
return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name))
|
||||
def test_migrate_volume_success_rep_partitioned(
|
||||
self, mck_retype, mck_get, mck_break, mck_valid, mck_partitioned):
|
||||
array_id = self.data.array
|
||||
volume = self.data.test_rep_volume
|
||||
device_id = self.data.device_id
|
||||
srp = self.data.srp
|
||||
target_slo = self.data.slo_silver
|
||||
target_workload = self.data.workload
|
||||
volume_name = volume.name
|
||||
new_type = {'extra_specs': {}}
|
||||
extra_specs = self.data.rep_extra_specs_rep_config
|
||||
self.common.promotion = True
|
||||
target_extra_specs = {
|
||||
utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo,
|
||||
utils.WORKLOAD: target_workload,
|
||||
utils.INTERVAL: extra_specs[utils.INTERVAL],
|
||||
utils.RETRIES: extra_specs[utils.RETRIES],
|
||||
utils.DISABLECOMPRESSION: False}
|
||||
success, model_update = self.common._migrate_volume(
|
||||
array_id, volume, device_id, srp, target_slo, target_workload,
|
||||
volume_name, new_type, extra_specs)
|
||||
self.assertEqual(0, mck_break.call_count)
|
||||
mck_retype.assert_called_once_with(
|
||||
array_id, srp, device_id, volume, volume_name, extra_specs,
|
||||
target_slo, target_workload, target_extra_specs)
|
||||
self.assertTrue(success)
|
||||
self.common.promotion = False
|
||||
|
||||
@mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_storage_group')
|
||||
@mock.patch.object(provision.PowerMaxProvision, 'get_or_create_group')
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
|
||||
@ -1414,6 +1633,26 @@ class PowerMaxReplicationTest(test.TestCase):
|
||||
self.assertEqual(extra_specs[utils.REP_CONFIG], rep_extra_specs)
|
||||
self.assertTrue(resume_rdf)
|
||||
|
||||
@mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg')
|
||||
@mock.patch.object(rest.PowerMaxRest, 'srdf_delete_device_pair')
|
||||
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
|
||||
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
|
||||
def test_break_rdf_device_pair_session_promotion_metro(
|
||||
self, mck_get, mck_del, mck_rem):
|
||||
array = self.data.array
|
||||
device_id = self.data.device_id
|
||||
volume_name = self.data.test_rep_volume.name
|
||||
extra_specs = self.data.ex_specs_rep_config
|
||||
rep_config = extra_specs[utils.REP_CONFIG]
|
||||
mgmt_group = self.data.rdf_managed_async_grp
|
||||
rdfg_no = extra_specs['rdf_group_no']
|
||||
self.common.break_rdf_device_pair_session_promotion(
|
||||
array, device_id, volume_name, extra_specs)
|
||||
mck_get.assert_called_once_with(rep_config)
|
||||
mck_del.assert_called_once_with(array, rdfg_no, device_id)
|
||||
mck_rem.assert_called_once_with(
|
||||
array, device_id, volume_name, mgmt_group, extra_specs)
|
||||
|
||||
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_group',
|
||||
return_value=tpd.PowerMaxData.rdf_group_details)
|
||||
@mock.patch.object(
|
||||
|
@ -354,6 +354,23 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
{'pool_name': 'Diamond+SRP_1+000197800111'}]
|
||||
self.assertEqual(ref_pools, new_pools)
|
||||
|
||||
def test_add_promotion_pools(self):
|
||||
array = self.data.array
|
||||
pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111',
|
||||
'location_info': '000197800111#SRP_1#None#Diamond'},
|
||||
{'pool_name': 'Gold+OLTP+SRP_1+000197800111',
|
||||
'location_info': '000197800111#SRP_1#OLTP#Gold'}]
|
||||
new_pools = self.utils.add_promotion_pools(pools, array)
|
||||
ref_pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111',
|
||||
'location_info': '000197800111#SRP_1#None#Diamond'},
|
||||
{'pool_name': 'Gold+OLTP+SRP_1+000197800111',
|
||||
'location_info': '000197800111#SRP_1#OLTP#Gold'},
|
||||
{'pool_name': 'Diamond+None+SRP_1+000197800123',
|
||||
'location_info': '000197800123#SRP_1#None#Diamond'},
|
||||
{'pool_name': 'Gold+OLTP+SRP_1+000197800123',
|
||||
'location_info': '000197800123#SRP_1#OLTP#Gold'}]
|
||||
self.assertEqual(ref_pools, new_pools)
|
||||
|
||||
def test_update_volume_group_name(self):
|
||||
group = self.data.test_group_1
|
||||
ref_group_name = self.data.test_vol_grp_name
|
||||
@ -1277,6 +1294,15 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
self.utils.validate_multiple_rep_device,
|
||||
rep_devices)
|
||||
|
||||
def test_validate_multiple_rep_device_promotion_start_backend_id(self):
|
||||
backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION
|
||||
rep_devices = deepcopy(self.data.multi_rep_device)
|
||||
rep_devices[0][utils.BACKEND_ID] = backend_id
|
||||
self.assertRaises(
|
||||
exception.InvalidConfigurationValue,
|
||||
self.utils.validate_multiple_rep_device,
|
||||
rep_devices)
|
||||
|
||||
def test_validate_multiple_rep_device_missing_backend_id(self):
|
||||
rep_devices = deepcopy(self.data.multi_rep_device)
|
||||
rep_devices[0].pop(utils.BACKEND_ID)
|
||||
@ -1357,6 +1383,12 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
excep_msg = str(e)
|
||||
self.assertIn(expected_str, excep_msg)
|
||||
|
||||
def test_get_rep_config_promotion_stats(self):
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
backend_id = 'testing'
|
||||
rep_device = self.utils.get_rep_config(backend_id, rep_configs, True)
|
||||
self.assertEqual(rep_configs[0], rep_device)
|
||||
|
||||
def test_get_replication_targets(self):
|
||||
rep_targets_expected = [self.data.remote_array]
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
@ -1365,25 +1397,27 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
|
||||
def test_validate_failover_request_success(self):
|
||||
is_failed_over = False
|
||||
is_promoted = False
|
||||
failover_backend_id = self.data.rep_backend_id_sync
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertTrue(is_valid)
|
||||
self.assertEqual("", msg)
|
||||
|
||||
def test_validate_failover_request_already_failed_over(self):
|
||||
is_failed_over = True
|
||||
is_promoted = False
|
||||
failover_backend_id = self.data.rep_backend_id_sync
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Cannot failover, the backend is already in a failed '
|
||||
'over state, if you meant to failback, please add '
|
||||
@ -1392,13 +1426,14 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
|
||||
def test_validate_failover_request_failback_missing_array(self):
|
||||
is_failed_over = True
|
||||
is_promoted = False
|
||||
failover_backend_id = 'default'
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.remote_array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Cannot failback, the configured primary array is '
|
||||
'not currently available to perform failback to. '
|
||||
@ -1406,15 +1441,33 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
'Unisphere.') % primary_array
|
||||
self.assertEqual(expected_msg, msg)
|
||||
|
||||
def test_validate_failover_request_promotion_finalize(self):
|
||||
is_failed_over = True
|
||||
is_promoted = True
|
||||
failover_backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Failover promotion currently in progress, please '
|
||||
'finish the promotion process and issue a failover '
|
||||
'using the "default" backend_id to complete this '
|
||||
'process.')
|
||||
self.assertEqual(expected_msg, msg)
|
||||
|
||||
def test_validate_failover_request_invalid_failback(self):
|
||||
is_failed_over = False
|
||||
is_promoted = False
|
||||
failover_backend_id = 'default'
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Cannot failback, backend is not in a failed over '
|
||||
'state. If you meant to failover, please either omit '
|
||||
@ -1424,13 +1477,14 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
|
||||
def test_validate_failover_request_no_backend_id_multi_rep(self):
|
||||
is_failed_over = False
|
||||
is_promoted = False
|
||||
failover_backend_id = None
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Cannot failover, no backend_id provided while '
|
||||
'multiple replication devices are defined in '
|
||||
@ -1441,6 +1495,7 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
|
||||
def test_validate_failover_request_incorrect_backend_id_multi_rep(self):
|
||||
is_failed_over = False
|
||||
is_promoted = False
|
||||
failover_backend_id = 'invalid_id'
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
@ -1448,7 +1503,23 @@ class PowerMaxUtilsTest(test.TestCase):
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.utils.validate_failover_request,
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list)
|
||||
primary_array, array_list, is_promoted)
|
||||
|
||||
def test_validate_failover_request_promotion_before_failover(self):
|
||||
is_failed_over = False
|
||||
is_promoted = False
|
||||
failover_backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION
|
||||
rep_configs = self.data.multi_rep_config_list
|
||||
primary_array = self.data.array
|
||||
array_list = [self.data.array]
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
is_failed_over, failover_backend_id, rep_configs,
|
||||
primary_array, array_list, is_promoted)
|
||||
self.assertFalse(is_valid)
|
||||
expected_msg = ('Cannot start failover promotion. The backend must '
|
||||
'already be in a failover state to perform this'
|
||||
'action.')
|
||||
self.assertEqual(expected_msg, msg)
|
||||
|
||||
def test_validate_replication_group_config_success(self):
|
||||
rep_configs = deepcopy(self.data.multi_rep_config_list)
|
||||
|
@ -219,9 +219,12 @@ class PowerMaxCommon(object):
|
||||
self.replication_enabled = False
|
||||
self.rep_devices = []
|
||||
self.failover = True if active_backend_id else False
|
||||
self.promotion = False
|
||||
self.powermax_array_tag_list = None
|
||||
self.powermax_short_host_name_template = None
|
||||
self.powermax_port_group_name_template = None
|
||||
if active_backend_id == utils.PMAX_FAILOVER_START_ARRAY_PROMOTION:
|
||||
self.promotion = True
|
||||
|
||||
# Gather environment info
|
||||
self._get_replication_info()
|
||||
@ -850,7 +853,8 @@ class PowerMaxCommon(object):
|
||||
array, volume, device_info['device_id'], extra_specs,
|
||||
connector, is_multiattach, async_grp=mgmt_sg_name,
|
||||
host_template=self.powermax_short_host_name_template)
|
||||
if self.utils.is_metro_device(rep_config, extra_specs):
|
||||
if (self.utils.is_metro_device(rep_config, extra_specs) and
|
||||
not self.promotion):
|
||||
# Need to remove from remote masking view
|
||||
device_info, __ = (self.find_host_lun_id(
|
||||
volume, host_name, extra_specs, rep_extra_specs))
|
||||
@ -875,6 +879,31 @@ class PowerMaxCommon(object):
|
||||
volume, extra_specs, device_info['device_id'], mv_list,
|
||||
sg_list)
|
||||
|
||||
def _unmap_lun_promotion(self, volume, connector):
|
||||
"""Unmaps a volume from the host during promotion.
|
||||
|
||||
:param volume: the volume Object
|
||||
:param connector: the connector Object
|
||||
"""
|
||||
extra_specs = self._initial_setup(volume)
|
||||
if not self.utils.is_replication_enabled(extra_specs):
|
||||
LOG.error('Unable to terminate connections for non-replicated '
|
||||
'volumes during promotion failover. Could not unmap '
|
||||
'volume %s', volume.id)
|
||||
else:
|
||||
mode = extra_specs[utils.REP_MODE]
|
||||
if mode == utils.REP_METRO:
|
||||
self._unmap_lun(volume, connector)
|
||||
else:
|
||||
# During a promotion scenario only Metro volumes will have
|
||||
# connections present on their remote volumes.
|
||||
loc = ast.literal_eval(volume.provider_location)
|
||||
device_id = loc.get('device_id')
|
||||
promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION]
|
||||
self.volume_metadata.capture_detach_info(
|
||||
volume, extra_specs, device_id, promotion_key,
|
||||
promotion_key)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection and returns device and connection info.
|
||||
|
||||
@ -1116,7 +1145,10 @@ class PowerMaxCommon(object):
|
||||
volume_name = volume.name
|
||||
LOG.info("Terminate connection: %(volume)s.",
|
||||
{'volume': volume_name})
|
||||
self._unmap_lun(volume, connector)
|
||||
if self.promotion:
|
||||
self._unmap_lun_promotion(volume, connector)
|
||||
else:
|
||||
self._unmap_lun(volume, connector)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extends an existing volume.
|
||||
@ -1333,7 +1365,7 @@ class PowerMaxCommon(object):
|
||||
for array_info in array_info_list:
|
||||
if self.failover:
|
||||
rep_config = self.utils.get_rep_config(
|
||||
self.active_backend_id, self.rep_configs)
|
||||
self.active_backend_id, self.rep_configs, True)
|
||||
array_info = self.get_secondary_stats_info(
|
||||
rep_config, array_info)
|
||||
# Add both SLO & Workload name in the pool name
|
||||
@ -1393,6 +1425,9 @@ class PowerMaxCommon(object):
|
||||
|
||||
pools.append(pool)
|
||||
pools = self.utils.add_legacy_pools(pools)
|
||||
if self.promotion:
|
||||
primary_array = self.configuration.safe_get('powermax_array')
|
||||
pools = self.utils.add_promotion_pools(pools, primary_array)
|
||||
data = {'vendor_name': "Dell EMC",
|
||||
'driver_version': self.version,
|
||||
'storage_protocol': 'unknown',
|
||||
@ -3798,6 +3833,11 @@ class PowerMaxCommon(object):
|
||||
{'volume': volume_name})
|
||||
|
||||
extra_specs = self._initial_setup(volume)
|
||||
if self.utils.is_replication_enabled(extra_specs) and self.promotion:
|
||||
rep_config = extra_specs.get('rep_config')
|
||||
extra_specs = self._get_replication_extra_specs(
|
||||
extra_specs, rep_config)
|
||||
|
||||
if not self.utils.is_retype_supported(volume, extra_specs,
|
||||
new_type['extra_specs'],
|
||||
self.rep_configs):
|
||||
@ -3839,17 +3879,21 @@ class PowerMaxCommon(object):
|
||||
# Check if old type and new type have different compression types
|
||||
do_change_compression = (self.utils.change_compression_type(
|
||||
is_compression_disabled, new_type))
|
||||
is_tgt_rep = self.utils.is_replication_enabled(
|
||||
new_type[utils.EXTRA_SPECS])
|
||||
is_valid, target_slo, target_workload = (
|
||||
self._is_valid_for_storage_assisted_migration(
|
||||
device_id, host, extra_specs[utils.ARRAY],
|
||||
extra_specs[utils.SRP], volume_name,
|
||||
do_change_compression, do_change_replication))
|
||||
do_change_compression, do_change_replication,
|
||||
extra_specs[utils.SLO], extra_specs[utils.WORKLOAD],
|
||||
is_tgt_rep))
|
||||
|
||||
if not is_valid:
|
||||
# Check if this is multiattach retype case
|
||||
do_change_multiattach = self.utils.change_multiattach(
|
||||
extra_specs, new_type['extra_specs'])
|
||||
if do_change_multiattach:
|
||||
if do_change_multiattach and not self.promotion:
|
||||
return True
|
||||
else:
|
||||
LOG.error(
|
||||
@ -3934,7 +3978,7 @@ class PowerMaxCommon(object):
|
||||
utils.BACKEND_ID_LEGACY_REP)
|
||||
backend_ids_differ = curr_backend_id != tgt_backend_id
|
||||
|
||||
if was_rep_enabled:
|
||||
if was_rep_enabled and not self.promotion:
|
||||
self._validate_rdfg_status(array, extra_specs)
|
||||
orig_mgmt_sg_name = self.utils.get_rdf_management_group_name(
|
||||
extra_specs[utils.REP_CONFIG])
|
||||
@ -3953,11 +3997,23 @@ class PowerMaxCommon(object):
|
||||
# Scenario 1: Rep -> Non-Rep
|
||||
# Scenario 2: Cleanup for Rep -> Diff Rep type
|
||||
if (was_rep_enabled and not is_rep_enabled) or backend_ids_differ:
|
||||
rep_extra_specs, resume_original_sg = (
|
||||
self.break_rdf_device_pair_session(
|
||||
array, device_id, volume_name, extra_specs, volume))
|
||||
if self.promotion:
|
||||
resume_original_sg = False
|
||||
rdf_group = extra_specs['rdf_group_no']
|
||||
is_partitioned = self._rdf_vols_partitioned(
|
||||
array, [volume], rdf_group)
|
||||
if not is_partitioned:
|
||||
self.break_rdf_device_pair_session_promotion(
|
||||
array, device_id, volume_name, extra_specs)
|
||||
else:
|
||||
rep_extra_specs, resume_original_sg = (
|
||||
self.break_rdf_device_pair_session(
|
||||
array, device_id, volume_name, extra_specs,
|
||||
volume))
|
||||
status = (REPLICATION_ERROR if self.promotion else
|
||||
REPLICATION_DISABLED)
|
||||
model_update = {
|
||||
'replication_status': REPLICATION_DISABLED,
|
||||
'replication_status': status,
|
||||
'replication_driver_data': None}
|
||||
rdf_pair_broken = True
|
||||
if resume_original_sg:
|
||||
@ -4014,7 +4070,8 @@ class PowerMaxCommon(object):
|
||||
self.rest.srdf_resume_replication(
|
||||
array, rep_extra_specs['mgmt_sg_name'],
|
||||
rep_extra_specs['rdf_group_no'], rep_extra_specs)
|
||||
if resume_original_sg and resume_original_sg_dict:
|
||||
if (resume_original_sg and resume_original_sg_dict and
|
||||
not self.promotion):
|
||||
self.rest.srdf_resume_replication(
|
||||
resume_original_sg_dict[utils.ARRAY],
|
||||
resume_original_sg_dict[utils.SG_NAME],
|
||||
@ -4026,6 +4083,14 @@ class PowerMaxCommon(object):
|
||||
model_update, volume.metadata,
|
||||
self.get_volume_metadata(array, device_id))
|
||||
|
||||
if self.promotion:
|
||||
previous_host = volume.get('host')
|
||||
host_details = previous_host.split('+')
|
||||
array_index = len(host_details) - 1
|
||||
host_details[array_index] = array
|
||||
updated_host = '+'.join(host_details)
|
||||
model_update['host'] = updated_host
|
||||
|
||||
target_backend_id = None
|
||||
if is_rep_enabled:
|
||||
target_backend_id = target_extra_specs.get(
|
||||
@ -4054,6 +4119,19 @@ class PowerMaxCommon(object):
|
||||
self, rdf_pair_broken, rdf_pair_created, vol_retyped,
|
||||
remote_retyped, extra_specs, target_extra_specs, volume,
|
||||
volume_name, device_id, source_sg):
|
||||
"""Attempt rollback to previous volume state before migrate exception.
|
||||
|
||||
:param rdf_pair_broken: was the rdf pair broken during migration
|
||||
:param rdf_pair_created: was a new rdf pair created during migration
|
||||
:param vol_retyped: was the local volume retyped during migration
|
||||
:param remote_retyped: was the remote volume retyped during migration
|
||||
:param extra_specs: extra specs
|
||||
:param target_extra_specs: target extra specs
|
||||
:param volume: volume
|
||||
:param volume_name: volume name
|
||||
:param device_id: local device id
|
||||
:param source_sg: local device pre-migrate storage group name
|
||||
"""
|
||||
array = extra_specs[utils.ARRAY]
|
||||
srp = extra_specs[utils.SRP]
|
||||
slo = extra_specs[utils.SLO]
|
||||
@ -4143,8 +4221,9 @@ class PowerMaxCommon(object):
|
||||
parent_sg = None
|
||||
if self.utils.is_replication_enabled(target_extra_specs):
|
||||
is_re, rep_mode = True, target_extra_specs['rep_mode']
|
||||
if self.utils.is_replication_enabled(extra_specs):
|
||||
mgmt_sg_name = self.utils.get_rdf_management_group_name(
|
||||
target_extra_specs[utils.REP_CONFIG])
|
||||
extra_specs[utils.REP_CONFIG])
|
||||
|
||||
device_info = self.rest.get_volume(array, device_id)
|
||||
|
||||
@ -4242,6 +4321,21 @@ class PowerMaxCommon(object):
|
||||
self, created_child_sg, add_sg_to_parent, got_default_sg,
|
||||
moved_between_sgs, array, source_sg, parent_sg, target_sg_name,
|
||||
extra_specs, device_id, volume, volume_name):
|
||||
"""Attempt to rollback to previous volume state on retype exception.
|
||||
|
||||
:param created_child_sg: was a child sg created during retype
|
||||
:param add_sg_to_parent: was a child sg added to parent during retype
|
||||
:param got_default_sg: was a default sg possibly created during retype
|
||||
:param moved_between_sgs: was the volume moved between storage groups
|
||||
:param array: array
|
||||
:param source_sg: volumes originating storage group name
|
||||
:param parent_sg: parent storage group name
|
||||
:param target_sg_name: storage group volume was to be moved to
|
||||
:param extra_specs: extra specs
|
||||
:param device_id: device id
|
||||
:param volume: volume
|
||||
:param volume_name: volume name
|
||||
"""
|
||||
if moved_between_sgs:
|
||||
LOG.debug('Volume retype cleanup - Attempt to revert move between '
|
||||
'storage groups.')
|
||||
@ -4416,7 +4510,8 @@ class PowerMaxCommon(object):
|
||||
|
||||
def _is_valid_for_storage_assisted_migration(
|
||||
self, device_id, host, source_array, source_srp, volume_name,
|
||||
do_change_compression, do_change_replication):
|
||||
do_change_compression, do_change_replication, source_slo,
|
||||
source_workload, is_tgt_rep):
|
||||
"""Check if volume is suitable for storage assisted (pool) migration.
|
||||
|
||||
:param device_id: the volume device id
|
||||
@ -4426,6 +4521,9 @@ class PowerMaxCommon(object):
|
||||
:param volume_name: the name of the volume to be migrated
|
||||
:param do_change_compression: do change compression
|
||||
:param do_change_replication: flag indicating replication change
|
||||
:param source_slo: slo setting for source volume type
|
||||
:param source_workload: workload setting for source volume type
|
||||
:param is_tgt_rep: is the target volume type replication enabled
|
||||
:returns: boolean -- True/False
|
||||
:returns: string -- targetSlo
|
||||
:returns: string -- targetWorkload
|
||||
@ -4460,24 +4558,53 @@ class PowerMaxCommon(object):
|
||||
LOG.error("Error parsing array, pool, SLO and workload.")
|
||||
return false_ret
|
||||
|
||||
if target_array_serial not in source_array:
|
||||
LOG.error(
|
||||
"The source array: %(source_array)s does not "
|
||||
"match the target array: %(target_array)s - "
|
||||
"skipping storage-assisted migration.",
|
||||
{'source_array': source_array,
|
||||
'target_array': target_array_serial})
|
||||
return false_ret
|
||||
if self.promotion:
|
||||
if do_change_compression:
|
||||
LOG.error(
|
||||
"When retyping during array promotion, compression "
|
||||
"changes should not occur during the retype operation. "
|
||||
"Please ensure the same compression settings are defined "
|
||||
"in the source and target volume types.")
|
||||
return false_ret
|
||||
|
||||
if target_srp not in source_srp:
|
||||
LOG.error(
|
||||
"Only SLO/workload migration within the same SRP Pool is "
|
||||
"supported in this version. The source pool: "
|
||||
"%(source_pool_name)s does not match the target array: "
|
||||
"%(target_pool)s. Skipping storage-assisted migration.",
|
||||
{'source_pool_name': source_srp,
|
||||
'target_pool': target_srp})
|
||||
return false_ret
|
||||
if source_slo != target_slo:
|
||||
LOG.error(
|
||||
"When retyping during array promotion, the SLO setting "
|
||||
"for the source and target volume types should match. "
|
||||
"Found %s SLO for the source volume type and %s SLO for "
|
||||
"the target volume type.", source_slo, target_slo)
|
||||
return false_ret
|
||||
|
||||
if source_workload != target_workload:
|
||||
LOG.error(
|
||||
"When retyping during array promotion, the workload "
|
||||
"setting for the source and target volume types should "
|
||||
"match. Found %s workload for the source volume type "
|
||||
"and %s workload for the target volume type.",
|
||||
source_workload, target_workload)
|
||||
return false_ret
|
||||
|
||||
if is_tgt_rep:
|
||||
LOG.error(
|
||||
"When retyping during array promotion, the target volume "
|
||||
"type should not have replication enabled. Please ensure "
|
||||
"replication is disabled on the target volume type.")
|
||||
return false_ret
|
||||
|
||||
if not self.promotion:
|
||||
if target_array_serial not in source_array:
|
||||
LOG.error("The source array: %s does not match the target "
|
||||
"array: %s - skipping storage-assisted "
|
||||
"migration.", source_array, target_array_serial)
|
||||
return false_ret
|
||||
|
||||
if target_srp not in source_srp:
|
||||
LOG.error(
|
||||
"Only SLO/workload migration within the same SRP Pool is "
|
||||
"supported in this version. The source pool: %s does not "
|
||||
"match the target array: %s. Skipping storage-assisted "
|
||||
"migration.", source_srp, target_srp)
|
||||
return false_ret
|
||||
|
||||
found_storage_group_list = self.rest.get_storage_groups_from_volume(
|
||||
source_array, device_id)
|
||||
@ -4608,6 +4735,23 @@ class PowerMaxCommon(object):
|
||||
add_to_mgmt_sg, r1_device_id, r2_device_id,
|
||||
mgmt_sg_name, array, remote_array, rdf_group_no, extra_specs,
|
||||
rep_extra_specs, volume, tgt_sg_name):
|
||||
"""Attempt rollback to previous volume state on setup rep exception.
|
||||
|
||||
:param resume_rdf: does the rdfg need to be resumed
|
||||
:param rdf_pair_created: was an rdf pair created
|
||||
:param remote_sg_get: was a remote storage group possibly created
|
||||
:param add_to_mgmt_sg: was the volume added to a management group
|
||||
:param r1_device_id: local device id
|
||||
:param r2_device_id: remote device id
|
||||
:param mgmt_sg_name: rdf management storage group name
|
||||
:param array: array
|
||||
:param remote_array: remote array
|
||||
:param rdf_group_no: rdf group number
|
||||
:param extra_specs: extra specs
|
||||
:param rep_extra_specs: rep extra specs
|
||||
:param volume: volume
|
||||
:param tgt_sg_name: remote replication storage group name
|
||||
"""
|
||||
if resume_rdf and not rdf_pair_created:
|
||||
LOG.debug('Configure volume replication cleanup - Attempt to '
|
||||
'resume replication.')
|
||||
@ -4748,7 +4892,7 @@ class PowerMaxCommon(object):
|
||||
sg_name = r1_sg_names[0]
|
||||
rdf_pair = self.rest.get_rdf_pair_volume(
|
||||
array, rdfg_no, device_id)
|
||||
rdf_pair_state = rdf_pair['rdfpairState']
|
||||
rdf_pair_state = rdf_pair[utils.RDF_PAIR_STATE]
|
||||
if rdf_pair_state.lower() not in utils.RDF_SYNCED_STATES:
|
||||
self.rest.wait_for_rdf_pair_sync(
|
||||
array, rdfg_no, device_id, rep_extra_specs)
|
||||
@ -4804,6 +4948,23 @@ class PowerMaxCommon(object):
|
||||
management_sg, rdf_group_no, extra_specs, r2_sg_names, device_id,
|
||||
remote_array, remote_device_id, volume, volume_name,
|
||||
rep_extra_specs):
|
||||
"""Attempt rollback to previous volume state on remove rep exception.
|
||||
|
||||
:param rdfg_suspended: was the rdf group suspended
|
||||
:param pair_deleted: was the rdf pair deleted
|
||||
:param r2_sg_remove: was the remote volume removed from its sg
|
||||
:param array: array
|
||||
:param management_sg: rdf management storage group name
|
||||
:param rdf_group_no: rdf group number
|
||||
:param extra_specs: extra specs
|
||||
:param r2_sg_names: remote volume storage group names
|
||||
:param device_id: device id
|
||||
:param remote_array: remote array sid
|
||||
:param remote_device_id: remote device id
|
||||
:param volume: volume
|
||||
:param volume_name: volume name
|
||||
:param rep_extra_specs: rep extra specs
|
||||
"""
|
||||
if rdfg_suspended and not pair_deleted:
|
||||
LOG.debug('Break RDF pair cleanup - Attempt to resume RDFG.')
|
||||
self.rest.srdf_resume_replication(
|
||||
@ -4844,6 +5005,54 @@ class PowerMaxCommon(object):
|
||||
LOG.debug('Break RDF pair cleanup - Revert to original rdf '
|
||||
'pair successful.')
|
||||
|
||||
def break_rdf_device_pair_session_promotion(
|
||||
self, array, device_id, volume_name, extra_specs):
|
||||
"""Delete RDF device pair deleting R2 volume but leaving R1 in place.
|
||||
|
||||
:param array: the array serial number
|
||||
:param device_id: the device id
|
||||
:param volume_name: the volume name
|
||||
:param extra_specs: the volume extra specifications
|
||||
"""
|
||||
LOG.debug('Starting promotion replication cleanup for RDF pair '
|
||||
'source device: %(d_id)s.', {'d_id': device_id})
|
||||
|
||||
mgmt_sg_name = None
|
||||
rep_config = extra_specs[utils.REP_CONFIG]
|
||||
rdfg_no = extra_specs['rdf_group_no']
|
||||
extra_specs['force_vol_remove'] = True
|
||||
if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]:
|
||||
mgmt_sg_name = self.utils.get_rdf_management_group_name(
|
||||
rep_config)
|
||||
|
||||
if rep_config['mode'] == utils.REP_METRO:
|
||||
group_states = self.rest.get_storage_group_rdf_group_state(
|
||||
array, mgmt_sg_name, rdfg_no)
|
||||
group_states = set([x.lower() for x in group_states])
|
||||
metro_active_states = {
|
||||
utils.RDF_ACTIVE, utils.RDF_ACTIVEACTIVE, utils.RDF_ACTIVEBIAS}
|
||||
active_state_found = (
|
||||
bool(group_states.intersection(metro_active_states)))
|
||||
if active_state_found:
|
||||
LOG.debug('Found Metro RDF in active state during promotion, '
|
||||
'attempting to suspend.')
|
||||
try:
|
||||
self.rest.srdf_suspend_replication(
|
||||
array, mgmt_sg_name, rdfg_no, extra_specs)
|
||||
except exception.VolumeBackendAPIException:
|
||||
LOG.error(
|
||||
'Found Metro rdf pair in active state during '
|
||||
'promotion. Attempt to suspend this group using '
|
||||
'storage group %s failed. Please move the rdf pairs '
|
||||
'in this storage group to a non-active state and '
|
||||
'retry the retype operation.', mgmt_sg_name)
|
||||
raise
|
||||
self.rest.srdf_delete_device_pair(array, rdfg_no, device_id)
|
||||
# Remove the volume from the R1 RDFG mgmt SG (R1)
|
||||
if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]:
|
||||
self.masking.remove_volume_from_sg(
|
||||
array, device_id, volume_name, mgmt_sg_name, extra_specs)
|
||||
|
||||
@coordination.synchronized('emc-{rdf_group}-rdf')
|
||||
def _cleanup_remote_target(
|
||||
self, array, volume, remote_array, device_id, target_device,
|
||||
@ -4963,12 +5172,14 @@ class PowerMaxCommon(object):
|
||||
:returns: secondary_id, volume_update_list, group_update_list
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
volume_update_list = list()
|
||||
group_update_list = list()
|
||||
primary_array = self._get_configuration_value(
|
||||
utils.VMAX_ARRAY, utils.POWERMAX_ARRAY)
|
||||
array_list = self.rest.get_arrays_list()
|
||||
is_valid, msg = self.utils.validate_failover_request(
|
||||
self.failover, secondary_id, self.rep_configs, primary_array,
|
||||
array_list)
|
||||
array_list, self.promotion)
|
||||
if not is_valid:
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidReplicationTarget(msg)
|
||||
@ -4977,13 +5188,21 @@ class PowerMaxCommon(object):
|
||||
if not self.failover:
|
||||
self.failover = True
|
||||
self.active_backend_id = secondary_id if secondary_id else None
|
||||
else:
|
||||
elif secondary_id == 'default':
|
||||
self.failover = False
|
||||
group_fo = 'default'
|
||||
|
||||
volume_update_list, group_update_list = (
|
||||
self._populate_volume_and_group_update_lists(
|
||||
volumes, groups, group_fo))
|
||||
if secondary_id == utils.PMAX_FAILOVER_START_ARRAY_PROMOTION:
|
||||
self.promotion = True
|
||||
LOG.info("Enabled array promotion.")
|
||||
else:
|
||||
volume_update_list, group_update_list = (
|
||||
self._populate_volume_and_group_update_lists(
|
||||
volumes, groups, group_fo))
|
||||
|
||||
if secondary_id == 'default' and self.promotion:
|
||||
self.promotion = False
|
||||
LOG.info("Disabled array promotion.")
|
||||
|
||||
LOG.info("Failover host complete.")
|
||||
return secondary_id, volume_update_list, group_update_list
|
||||
@ -5061,7 +5280,17 @@ class PowerMaxCommon(object):
|
||||
volume_update_list += vol_updates
|
||||
|
||||
if len(non_rep_vol_list) > 0:
|
||||
if self.failover:
|
||||
if self.promotion:
|
||||
# Volumes that were promoted will have a replication state
|
||||
# of error with no other replication metadata. Use this to
|
||||
# determine which volumes should updated to have a replication
|
||||
# state of disabled.
|
||||
for vol in non_rep_vol_list:
|
||||
volume_update_list.append({
|
||||
'volume_id': vol.id,
|
||||
'updates': {
|
||||
'replication_status': REPLICATION_DISABLED}})
|
||||
elif self.failover:
|
||||
# Since the array has been failed-over,
|
||||
# volumes without replication should be in error.
|
||||
for vol in non_rep_vol_list:
|
||||
@ -5633,67 +5862,133 @@ class PowerMaxCommon(object):
|
||||
and not group.is_replicated):
|
||||
raise NotImplementedError()
|
||||
|
||||
array, interval_retries_dict = self._get_volume_group_info(group)
|
||||
model_update = {'status': fields.GroupStatus.AVAILABLE}
|
||||
add_vols = [vol for vol in add_volumes] if add_volumes else []
|
||||
add_device_ids = self._get_volume_device_ids(add_vols, array)
|
||||
remove_vols = [vol for vol in remove_volumes] if remove_volumes else []
|
||||
remove_device_ids = self._get_volume_device_ids(remove_vols, array)
|
||||
vol_grp_name = None
|
||||
try:
|
||||
volume_group = self._find_volume_group(array, group)
|
||||
if volume_group:
|
||||
if 'name' in volume_group:
|
||||
vol_grp_name = volume_group['name']
|
||||
if vol_grp_name is None:
|
||||
raise exception.GroupNotFound(group_id=group.id)
|
||||
# Add volume(s) to the group
|
||||
if add_device_ids:
|
||||
self.utils.check_rep_status_enabled(group)
|
||||
for vol in add_vols:
|
||||
extra_specs = self._initial_setup(vol)
|
||||
self.utils.check_replication_matched(vol, extra_specs)
|
||||
self.masking.add_volumes_to_storage_group(
|
||||
array, add_device_ids, vol_grp_name, interval_retries_dict)
|
||||
if group.is_replicated:
|
||||
# Add remote volumes to remote storage group
|
||||
self.masking.add_remote_vols_to_volume_group(
|
||||
add_vols, group, interval_retries_dict)
|
||||
# Remove volume(s) from the group
|
||||
if remove_device_ids:
|
||||
if group.is_replicated:
|
||||
# Need force flag when manipulating RDF enabled SGs
|
||||
interval_retries_dict[utils.FORCE_VOL_REMOVE] = True
|
||||
# Check if the volumes exist in the storage group
|
||||
temp_list = deepcopy(remove_device_ids)
|
||||
for device_id in temp_list:
|
||||
if not self.rest.is_volume_in_storagegroup(
|
||||
array, device_id, vol_grp_name):
|
||||
remove_device_ids.remove(device_id)
|
||||
if self.promotion:
|
||||
self._update_group_promotion(
|
||||
group, add_volumes, remove_volumes)
|
||||
elif self.failover:
|
||||
msg = _('Cannot perform group updates during failover, please '
|
||||
'either failback or perform a promotion operation.')
|
||||
raise exception.VolumeBackendAPIException(msg)
|
||||
else:
|
||||
array, interval_retries_dict = self._get_volume_group_info(group)
|
||||
add_vols = [vol for vol in add_volumes] if add_volumes else []
|
||||
add_device_ids = self._get_volume_device_ids(add_vols, array)
|
||||
remove_vols = [
|
||||
vol for vol in remove_volumes] if remove_volumes else []
|
||||
remove_device_ids = self._get_volume_device_ids(remove_vols, array)
|
||||
vol_grp_name = None
|
||||
try:
|
||||
volume_group = self._find_volume_group(array, group)
|
||||
if volume_group:
|
||||
if 'name' in volume_group:
|
||||
vol_grp_name = volume_group['name']
|
||||
if vol_grp_name is None:
|
||||
raise exception.GroupNotFound(group_id=group.id)
|
||||
# Add volume(s) to the group
|
||||
if add_device_ids:
|
||||
self.utils.check_rep_status_enabled(group)
|
||||
for vol in add_vols:
|
||||
extra_specs = self._initial_setup(vol)
|
||||
self.utils.check_replication_matched(vol, extra_specs)
|
||||
self.masking.add_volumes_to_storage_group(
|
||||
array, add_device_ids, vol_grp_name,
|
||||
interval_retries_dict)
|
||||
if group.is_replicated:
|
||||
# Add remote volumes to remote storage group
|
||||
self.masking.add_remote_vols_to_volume_group(
|
||||
add_vols, group, interval_retries_dict)
|
||||
# Remove volume(s) from the group
|
||||
if remove_device_ids:
|
||||
self.masking.remove_volumes_from_storage_group(
|
||||
array, remove_device_ids,
|
||||
vol_grp_name, interval_retries_dict)
|
||||
if group.is_replicated:
|
||||
# Remove remote volumes from the remote storage group
|
||||
self._remove_remote_vols_from_volume_group(
|
||||
array, remove_vols, group, interval_retries_dict)
|
||||
except exception.GroupNotFound:
|
||||
raise
|
||||
except Exception as ex:
|
||||
exception_message = (_("Failed to update volume group:"
|
||||
" %(volGrpName)s. Exception: %(ex)s.")
|
||||
% {'volGrpName': group.id,
|
||||
'ex': ex})
|
||||
LOG.error(exception_message)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=exception_message)
|
||||
if group.is_replicated:
|
||||
# Need force flag when manipulating RDF enabled SGs
|
||||
interval_retries_dict[utils.FORCE_VOL_REMOVE] = True
|
||||
# Check if the volumes exist in the storage group
|
||||
temp_list = deepcopy(remove_device_ids)
|
||||
for device_id in temp_list:
|
||||
if not self.rest.is_volume_in_storagegroup(
|
||||
array, device_id, vol_grp_name):
|
||||
remove_device_ids.remove(device_id)
|
||||
if remove_device_ids:
|
||||
self.masking.remove_volumes_from_storage_group(
|
||||
array, remove_device_ids,
|
||||
vol_grp_name, interval_retries_dict)
|
||||
if group.is_replicated:
|
||||
# Remove remote volumes from the remote storage group
|
||||
self._remove_remote_vols_from_volume_group(
|
||||
array, remove_vols, group, interval_retries_dict)
|
||||
except exception.GroupNotFound:
|
||||
raise
|
||||
except Exception as ex:
|
||||
exception_message = (_("Failed to update volume group:"
|
||||
" %(volGrpName)s. Exception: %(ex)s.")
|
||||
% {'volGrpName': group.id,
|
||||
'ex': ex})
|
||||
LOG.error(exception_message)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=exception_message)
|
||||
|
||||
self.volume_metadata.capture_modify_group(
|
||||
vol_grp_name, group.id, add_vols, remove_volumes, array)
|
||||
self.volume_metadata.capture_modify_group(
|
||||
vol_grp_name, group.id, add_vols, remove_volumes, array)
|
||||
|
||||
return model_update, None, None
|
||||
|
||||
def _update_group_promotion(self, group, add_volumes, remove_volumes):
|
||||
"""Updates LUNs in generic volume group during array promotion.
|
||||
|
||||
:param group: storage configuration service instance
|
||||
:param add_volumes: the volumes uuids you want to add to the vol grp
|
||||
:param remove_volumes: the volumes uuids you want to remove from
|
||||
the CG
|
||||
:returns: model_update
|
||||
:raises: VolumeBackendAPIException
|
||||
"""
|
||||
if not group.is_replicated:
|
||||
msg = _('Group updates are only supported on replicated volume '
|
||||
'groups during failover promotion.')
|
||||
raise exception.VolumeBackendAPIException(msg)
|
||||
if add_volumes:
|
||||
msg = _('Unable to add to volumes to a group, only volume '
|
||||
'removal is supported during promotion.')
|
||||
raise exception.VolumeBackendAPIException(msg)
|
||||
|
||||
# Either add_volumes or remove_volumes must be provided, if add_volumes
|
||||
# then excep is raised, other there must be remove_volumes present
|
||||
volume = remove_volumes[0]
|
||||
extra_specs = self._initial_setup(volume, volume.volume_type_id)
|
||||
rep_extra_specs = self._get_replication_extra_specs(
|
||||
extra_specs, extra_specs[utils.REP_CONFIG])
|
||||
remote_array = rep_extra_specs['array']
|
||||
|
||||
vol_grp_name = None
|
||||
volume_group = self._find_volume_group(remote_array, group)
|
||||
if volume_group:
|
||||
if 'name' in volume_group:
|
||||
vol_grp_name = volume_group['name']
|
||||
if vol_grp_name is None:
|
||||
raise exception.GroupNotFound(group_id=group.id)
|
||||
|
||||
interval_retries_dict = {
|
||||
utils.INTERVAL: self.interval, utils.RETRIES: self.retries}
|
||||
# Volumes have already failed over and had their provider_location
|
||||
# updated, do not get remote device IDs here
|
||||
remove_device_ids = self._get_volume_device_ids(
|
||||
remove_volumes, remote_array)
|
||||
if remove_device_ids:
|
||||
interval_retries_dict[utils.FORCE_VOL_REMOVE] = True
|
||||
# Check if the volumes exist in the storage group
|
||||
temp_list = deepcopy(remove_device_ids)
|
||||
for device_id in temp_list:
|
||||
if not self.rest.is_volume_in_storagegroup(
|
||||
remote_array, device_id, vol_grp_name):
|
||||
remove_device_ids.remove(device_id)
|
||||
if remove_device_ids:
|
||||
self.masking.remove_volumes_from_storage_group(
|
||||
remote_array, remove_device_ids,
|
||||
vol_grp_name, interval_retries_dict)
|
||||
self.volume_metadata.capture_modify_group(
|
||||
vol_grp_name, group.id, list(), remove_volumes, remote_array)
|
||||
|
||||
def _remove_remote_vols_from_volume_group(
|
||||
self, array, volumes, group, extra_specs):
|
||||
"""Remove the remote volumes from their volume group.
|
||||
@ -6166,7 +6461,10 @@ class PowerMaxCommon(object):
|
||||
if vol_grp_name is None:
|
||||
raise exception.GroupNotFound(group_id=group.id)
|
||||
|
||||
if not is_metro:
|
||||
is_partitioned = self._rdf_vols_partitioned(
|
||||
remote_array, volumes, rdf_group_no)
|
||||
|
||||
if not is_metro and not is_partitioned:
|
||||
if failover:
|
||||
self.rest.srdf_failover_group(
|
||||
remote_array, vol_grp_name, rdf_group_no, extra_specs)
|
||||
@ -6217,6 +6515,29 @@ class PowerMaxCommon(object):
|
||||
LOG.debug("Volume model updates: %s", vol_model_updates)
|
||||
return model_update, vol_model_updates
|
||||
|
||||
def _rdf_vols_partitioned(self, array, volumes, rdfg):
|
||||
"""Check if rdf volumes have been failed over by powermax array
|
||||
|
||||
:param array: remote array
|
||||
:param volumes: rdf volumes
|
||||
:param rdfg: rdf group
|
||||
:return: devices have partitioned states
|
||||
"""
|
||||
is_partitioned = False
|
||||
for volume in volumes:
|
||||
if self.promotion:
|
||||
vol_data = volume.provider_location
|
||||
else:
|
||||
vol_data = volume.replication_driver_data
|
||||
vol_data = ast.literal_eval(vol_data)
|
||||
device_id = vol_data.get(utils.DEVICE_ID)
|
||||
vol_details = self.rest.get_rdf_pair_volume(array, rdfg, device_id)
|
||||
rdf_pair_state = vol_details.get(utils.RDF_PAIR_STATE, '').lower()
|
||||
if rdf_pair_state in utils.RDF_PARTITIONED_STATES:
|
||||
is_partitioned = True
|
||||
break
|
||||
return is_partitioned
|
||||
|
||||
def get_attributes_from_cinder_config(self):
|
||||
"""Get all attributes from the configuration file
|
||||
|
||||
|
@ -80,12 +80,16 @@ RDF_FAILEDOVER_STATE = 'failed over'
|
||||
RDF_ACTIVE = 'active'
|
||||
RDF_ACTIVEACTIVE = 'activeactive'
|
||||
RDF_ACTIVEBIAS = 'activebias'
|
||||
RDF_PARTITIONED_STATE = 'partitioned'
|
||||
RDF_TRANSIDLE_STATE = 'transidle'
|
||||
RDF_PAIR_STATE = 'rdfpairState'
|
||||
RDF_VALID_STATES_SYNC = [RDF_SYNC_STATE, RDF_SUSPENDED_STATE,
|
||||
RDF_SYNCINPROG_STATE]
|
||||
RDF_VALID_STATES_ASYNC = [RDF_CONSISTENT_STATE, RDF_SUSPENDED_STATE,
|
||||
RDF_SYNCINPROG_STATE]
|
||||
RDF_VALID_STATES_METRO = [RDF_ACTIVEBIAS, RDF_ACTIVEACTIVE,
|
||||
RDF_SUSPENDED_STATE, RDF_SYNCINPROG_STATE]
|
||||
RDF_PARTITIONED_STATES = [RDF_PARTITIONED_STATE, RDF_TRANSIDLE_STATE]
|
||||
RDF_CONS_EXEMPT = 'exempt'
|
||||
RDF_ALLOW_METRO_DELETE = 'allow_delete_metro'
|
||||
RDF_GROUP_NO = 'rdf_group_number'
|
||||
@ -102,6 +106,7 @@ USED_HOST_NAME = "used_host_name"
|
||||
RDF_SYNCED_STATES = [RDF_SYNC_STATE, RDF_CONSISTENT_STATE,
|
||||
RDF_ACTIVEACTIVE, RDF_ACTIVEBIAS]
|
||||
FORCE_VOL_REMOVE = 'force_vol_remove'
|
||||
PMAX_FAILOVER_START_ARRAY_PROMOTION = 'pmax_failover_start_array_promotion'
|
||||
|
||||
# Multiattach constants
|
||||
IS_MULTIATTACH = 'multiattach'
|
||||
@ -794,6 +799,35 @@ class PowerMaxUtils(object):
|
||||
pools.append(new_pool)
|
||||
return pools
|
||||
|
||||
@staticmethod
|
||||
def add_promotion_pools(pools, primary_array):
|
||||
"""Add duplicate pools with primary SID for operations during promotion
|
||||
|
||||
:param pools: the pool list
|
||||
:param primary_array: the original primary array.
|
||||
:returns: pools - the updated pool list
|
||||
"""
|
||||
i_pools = deepcopy(pools)
|
||||
for pool in i_pools:
|
||||
# pool name
|
||||
pool_name = pool['pool_name']
|
||||
split_name = pool_name.split('+')
|
||||
array_pos = 3 if len(split_name) == 4 else 2
|
||||
array_sid = split_name[array_pos]
|
||||
updated_pool_name = re.sub(array_sid, primary_array, pool_name)
|
||||
|
||||
# location info
|
||||
loc = pool['location_info']
|
||||
split_loc = loc.split('#')
|
||||
split_loc[0] = primary_array # Replace the array SID
|
||||
updated_loc = '#'.join(split_loc)
|
||||
|
||||
new_pool = deepcopy(pool)
|
||||
new_pool['pool_name'] = updated_pool_name
|
||||
new_pool['location_info'] = updated_loc
|
||||
pools.append(new_pool)
|
||||
return pools
|
||||
|
||||
def check_replication_matched(self, volume, extra_specs):
|
||||
"""Check volume type and group type.
|
||||
|
||||
@ -1204,6 +1238,14 @@ class PowerMaxUtils(object):
|
||||
'are defined in cinder.conf, backend_id %s is '
|
||||
'defined more than once.') % backend_id)
|
||||
raise exception.InvalidConfigurationValue(msg)
|
||||
elif backend_id == PMAX_FAILOVER_START_ARRAY_PROMOTION:
|
||||
msg = (_('Invalid Backend ID found. Defining a '
|
||||
'replication device with a Backend ID of %s is '
|
||||
'currently not supported. Please update '
|
||||
'the Backend ID of the related replication '
|
||||
'device in cinder.conf to use valid '
|
||||
'Backend ID value.') % backend_id)
|
||||
raise exception.InvalidConfigurationValue(msg)
|
||||
else:
|
||||
msg = _('Backend IDs must be assigned for each rep_device '
|
||||
'when multiple replication devices are defined in '
|
||||
@ -1787,11 +1829,12 @@ class PowerMaxUtils(object):
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_rep_config(backend_id, rep_configs):
|
||||
def get_rep_config(backend_id, rep_configs, promotion_vol_stats=False):
|
||||
"""Get rep_config for given backend_id.
|
||||
|
||||
:param backend_id: rep config search key -- str
|
||||
:param rep_configs: backend rep_configs -- list
|
||||
:param promotion_vol_stats: get rep config for vol stats -- bool
|
||||
:returns: rep_config -- dict
|
||||
"""
|
||||
if len(rep_configs) == 1:
|
||||
@ -1802,20 +1845,25 @@ class PowerMaxUtils(object):
|
||||
if rep_config[BACKEND_ID] == backend_id:
|
||||
rep_device = rep_config
|
||||
if rep_device is None:
|
||||
msg = (_('Could not find a replication_device with a '
|
||||
'backend_id of "%s" in cinder.conf. Please confirm '
|
||||
'that the replication_device_backend_id extra spec '
|
||||
'for this volume type matches the backend_id of the '
|
||||
'intended replication_device in '
|
||||
'cinder.conf.') % backend_id)
|
||||
if BACKEND_ID_LEGACY_REP in msg:
|
||||
msg = (_('Could not find replication_device. Legacy '
|
||||
'replication_device key found, please ensure the '
|
||||
'backend_id for the legacy replication_device in '
|
||||
'cinder.conf has been changed to '
|
||||
'"%s".') % BACKEND_ID_LEGACY_REP)
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(msg)
|
||||
if promotion_vol_stats:
|
||||
# Stat collection only need remote array and srp, any of
|
||||
# the available replication_devices can provide this.
|
||||
rep_device = rep_configs[0]
|
||||
else:
|
||||
msg = (_('Could not find a replication_device with a '
|
||||
'backend_id of "%s" in cinder.conf. Please '
|
||||
'confirm that the replication_device_backend_id '
|
||||
'extra spec for this volume type matches the '
|
||||
'backend_id of the intended replication_device '
|
||||
'in cinder.conf.') % backend_id)
|
||||
if BACKEND_ID_LEGACY_REP in msg:
|
||||
msg = (_('Could not find replication_device. Legacy '
|
||||
'replication_device key found, please ensure '
|
||||
'the backend_id for the legacy '
|
||||
'replication_device in cinder.conf has been '
|
||||
'changed to "%s".') % BACKEND_ID_LEGACY_REP)
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(msg)
|
||||
return rep_device
|
||||
|
||||
@staticmethod
|
||||
@ -1834,7 +1882,8 @@ class PowerMaxUtils(object):
|
||||
return list(replication_targets)
|
||||
|
||||
def validate_failover_request(self, is_failed_over, failover_backend_id,
|
||||
rep_configs, primary_array, arrays_list):
|
||||
rep_configs, primary_array, arrays_list,
|
||||
is_promoted):
|
||||
"""Validate failover_host request's parameters
|
||||
|
||||
Validate that a failover_host operation can be performed with
|
||||
@ -1845,23 +1894,32 @@ class PowerMaxUtils(object):
|
||||
:param rep_configs: backend rep_configs -- list
|
||||
:param primary_array: configured primary array SID -- string
|
||||
:param arrays_list: list of U4P symmetrix IDs -- list
|
||||
:param is_promoted: current promotion state -- bool
|
||||
:return: (bool, str) is valid, reason on invalid
|
||||
"""
|
||||
is_valid = True
|
||||
msg = ""
|
||||
if is_failed_over:
|
||||
if failover_backend_id != 'default':
|
||||
valid_backend_ids = [
|
||||
'default', PMAX_FAILOVER_START_ARRAY_PROMOTION]
|
||||
if failover_backend_id not in valid_backend_ids:
|
||||
is_valid = False
|
||||
msg = _('Cannot failover, the backend is already in a failed '
|
||||
'over state, if you meant to failback, please add '
|
||||
'--backend_id default to the command.')
|
||||
elif primary_array not in arrays_list:
|
||||
elif (failover_backend_id == 'default' and
|
||||
primary_array not in arrays_list):
|
||||
is_valid = False
|
||||
msg = _('Cannot failback, the configured primary array is '
|
||||
'not currently available to perform failback to. '
|
||||
'Please ensure array %s is visible in '
|
||||
'Unisphere.') % primary_array
|
||||
|
||||
elif is_promoted and failover_backend_id != 'default':
|
||||
is_valid = False
|
||||
msg = _('Failover promotion currently in progress, please '
|
||||
'finish the promotion process and issue a failover '
|
||||
'using the "default" backend_id to complete this '
|
||||
'process.')
|
||||
else:
|
||||
if failover_backend_id == 'default':
|
||||
is_valid = False
|
||||
@ -1869,6 +1927,11 @@ class PowerMaxUtils(object):
|
||||
'state. If you meant to failover, please either omit '
|
||||
'the --backend_id parameter or use the --backend_id '
|
||||
'parameter with a valid backend id.')
|
||||
elif failover_backend_id == PMAX_FAILOVER_START_ARRAY_PROMOTION:
|
||||
is_valid = False
|
||||
msg = _('Cannot start failover promotion. The backend must '
|
||||
'already be in a failover state to perform this'
|
||||
'action.')
|
||||
elif len(rep_configs) > 1:
|
||||
if failover_backend_id is None:
|
||||
is_valid = False
|
||||
|
Loading…
Reference in New Issue
Block a user