PowerMax Driver - Storage-assisted in-use retype support

PowerMax for Cinder driver now supports storage-assisted
in-use retype for volumes including those in replication
sessions.

Change-Id: If0d1262f0a400f1ab66615bead751591a9a969e9
Implements: blueprint powermax-storage-assisted-inuse-retype
This commit is contained in:
Michael McAleer 2019-02-07 15:48:54 +00:00
parent 9fc3b1ac03
commit 9638cc1774
7 changed files with 599 additions and 81 deletions

View File

@ -171,6 +171,9 @@ class PowerMaxCommonData(object):
provider_location4 = {'array': six.text_type(uni_array), provider_location4 = {'array': six.text_type(uni_array),
'device_id': device_id} 'device_id': device_id}
provider_location5 = {'array': remote_array,
'device_id': device_id}
legacy_provider_location = { legacy_provider_location = {
'classname': 'Symm_StorageVolume', 'classname': 'Symm_StorageVolume',
'keybindings': {'CreationClassName': u'Symm_StorageVolume', 'keybindings': {'CreationClassName': u'Symm_StorageVolume',
@ -196,9 +199,10 @@ class PowerMaxCommonData(object):
replication_driver_data=six.text_type(provider_location3)) replication_driver_data=six.text_type(provider_location3))
test_attached_volume = fake_volume.fake_volume_obj( test_attached_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None, id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', context=ctx, name='vol1',
size=0, provider_auth=None, attach_status='attached',
provider_location=six.text_type(provider_location), host=fake_host, provider_location=six.text_type(provider_location), host=fake_host,
volume_type=test_volume_type, attach_status="attached", volume_type=test_volume_type,
replication_driver_data=six.text_type(provider_location3)) replication_driver_data=six.text_type(provider_location3))
test_legacy_vol = fake_volume.fake_volume_obj( test_legacy_vol = fake_volume.fake_volume_obj(
@ -279,6 +283,10 @@ class PowerMaxCommonData(object):
'array': array, 'array': array,
'interval': 3, 'interval': 3,
'retries': 120} 'retries': 120}
extra_specs_migrate = deepcopy(extra_specs)
extra_specs_migrate[utils.PORTGROUPNAME] = port_group_name_f
extra_specs_disable_compression = deepcopy(extra_specs) extra_specs_disable_compression = deepcopy(extra_specs)
extra_specs_disable_compression[utils.DISABLECOMPRESSION] = "true" extra_specs_disable_compression[utils.DISABLECOMPRESSION] = "true"
extra_specs_intervals_set = deepcopy(extra_specs) extra_specs_intervals_set = deepcopy(extra_specs)
@ -294,10 +302,16 @@ class PowerMaxCommonData(object):
rep_extra_specs['rep_mode'] = 'Synchronous' rep_extra_specs['rep_mode'] = 'Synchronous'
rep_extra_specs2 = deepcopy(rep_extra_specs) rep_extra_specs2 = deepcopy(rep_extra_specs)
rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f
rep_extra_specs3 = deepcopy(rep_extra_specs)
rep_extra_specs3['slo'] = slo
rep_extra_specs3['workload'] = workload
rep_extra_specs4 = deepcopy(rep_extra_specs3)
rep_extra_specs4['rdf_group_label'] = rdf_group_name
test_volume_type_1 = volume_type.VolumeType( test_volume_type_1 = volume_type.VolumeType(
id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc', id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc',
extra_specs=extra_specs extra_specs=extra_specs)
)
test_volume_type_list = volume_type.VolumeTypeList( test_volume_type_list = volume_type.VolumeTypeList(
objects=[test_volume_type_1]) objects=[test_volume_type_1])
@ -584,6 +598,24 @@ class PowerMaxCommonData(object):
"random_sg_2"]}, "random_sg_2"]},
] ]
volume_details_attached = {"cap_gb": 2,
"num_of_storage_groups": 1,
"volumeId": device_id,
"volume_identifier": "OS-%s" % test_volume.id,
"wwn": volume_wwn,
"snapvx_target": 'false',
"snapvx_source": 'false',
"storageGroupId": [storagegroup_name_f]}
volume_details_no_sg = {"cap_gb": 2,
"num_of_storage_groups": 1,
"volumeId": device_id,
"volume_identifier": "OS-%s" % test_volume.id,
"wwn": volume_wwn,
"snapvx_target": 'false',
"snapvx_source": 'false',
"storageGroupId": []}
volume_list = [ volume_list = [
{"resultList": {"result": [{"volumeId": device_id}]}}, {"resultList": {"result": [{"volumeId": device_id}]}},
{"resultList": {"result": [{"volumeId": device_id2}]}}, {"resultList": {"result": [{"volumeId": device_id2}]}},
@ -1798,6 +1830,16 @@ class PowerMaxUtilsTest(test.TestCase):
self.assertFalse( self.assertFalse(
self.utils.is_snapshot_manageable(volume)) self.utils.is_snapshot_manageable(volume))
def test_get_volume_attached_hostname(self):
device_info_pass = self.data.volume_details_attached
# Success
hostname = self.utils.get_volume_attached_hostname(device_info_pass)
self.assertEqual('HostX', hostname)
# Fail
device_info_fail = self.data.volume_details_no_sg
hostname = self.utils.get_volume_attached_hostname(device_info_fail)
self.assertIsNone(hostname)
class PowerMaxRestTest(test.TestCase): class PowerMaxRestTest(test.TestCase):
def setUp(self): def setUp(self):
@ -2252,17 +2294,16 @@ class PowerMaxRestTest(test.TestCase):
def test_add_child_sg_to_parent_sg(self): def test_add_child_sg_to_parent_sg(self):
payload = {"editStorageGroupActionParam": { payload = {"editStorageGroupActionParam": {
"expandStorageGroupParam": { "addExistingStorageGroupParam": {
"addExistingStorageGroupParam": { "storageGroupId": [self.data.storagegroup_name_f]}}}
"storageGroupId": [self.data.storagegroup_name_f]}}}}
with mock.patch.object( with mock.patch.object(
self.rest, 'modify_storage_group', self.rest, 'modify_storage_group',
return_value=(202, self.data.job_list[0])) as mock_modify: return_value=(202, self.data.job_list[0])) as mck_mod_sg:
self.rest.add_child_sg_to_parent_sg( self.rest.add_child_sg_to_parent_sg(
self.data.array, self.data.storagegroup_name_f, self.data.array, self.data.storagegroup_name_f,
self.data.parent_sg_f, self.data.extra_specs) self.data.parent_sg_f, self.data.extra_specs)
mock_modify.assert_called_once_with( mck_mod_sg.assert_called_once_with(
self.data.array, self.data.parent_sg_f, payload) self.data.array, self.data.parent_sg_f, payload, version='83')
def test_remove_child_sg_from_parent_sg(self): def test_remove_child_sg_from_parent_sg(self):
payload = {"editStorageGroupActionParam": { payload = {"editStorageGroupActionParam": {
@ -4747,7 +4788,8 @@ class PowerMaxCommonTest(test.TestCase):
volume_name, volume_size, extra_specs) volume_name, volume_size, extra_specs)
mock_get.assert_called_once_with( mock_get.assert_called_once_with(
extra_specs['array'], extra_specs[utils.SRP], extra_specs['array'], extra_specs[utils.SRP],
extra_specs[utils.SLO], 'NONE', extra_specs, True) extra_specs[utils.SLO], 'NONE', extra_specs, True,
False, None)
def test_create_volume_failed(self): def test_create_volume_failed(self):
volume_name = self.data.test_volume.name volume_name = self.data.test_volume.name
@ -4786,6 +4828,27 @@ class PowerMaxCommonTest(test.TestCase):
self.common._create_volume, self.common._create_volume,
volume_name, volume_size, extra_specs) volume_name, volume_size, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array',
return_value=False)
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload',
return_value=(True, True))
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg')
def test_create_volume_in_use_replication_enabled(self, mock_create,
mock_verify,
mock_nextgen):
volume_name = '1'
volume_size = self.data.test_volume.size
rep_extra_specs = self.data.rep_extra_specs3
with mock.patch.object(
self.masking,
'get_or_create_default_storage_group') as mck_sg:
self.common._create_volume(
volume_name, volume_size, rep_extra_specs, in_use=True)
mck_sg.assert_called_once_with(
rep_extra_specs['array'], rep_extra_specs['srp'],
rep_extra_specs['slo'], rep_extra_specs['workload'],
rep_extra_specs, False, True, rep_extra_specs['rep_mode'])
def test_set_vmax_extra_specs(self): def test_set_vmax_extra_specs(self):
srp_record = self.common.get_attributes_from_cinder_config() srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs( extra_specs = self.common._set_vmax_extra_specs(
@ -5343,15 +5406,160 @@ class PowerMaxCommonTest(test.TestCase):
self.common.retype(volume, new_type, host) self.common.retype(volume, new_type, host)
mock_migrate.assert_called_once_with( mock_migrate.assert_called_once_with(
device_id, volume, host, volume_name, new_type, extra_specs) device_id, volume, host, volume_name, new_type, extra_specs)
mock_migrate.reset_mock()
with mock.patch.object( with mock.patch.object(
self.common, '_find_device_on_array', return_value=None): self.common, '_find_device_on_array', return_value=None):
self.common.retype(volume, new_type, host) self.assertFalse(self.common.retype(volume, new_type, host))
mock_migrate.assert_not_called()
mock_migrate.reset_mock() def test_retype_attached_vol(self):
volume2 = self.data.test_attached_volume host = {'host': self.data.new_host}
self.common.retype(volume2, new_type, host) new_type = {'extra_specs': {}}
mock_migrate.assert_not_called() with mock.patch.object(
self.common, '_find_device_on_array', return_value=True):
with mock.patch.object(self.common,
'_slo_workload_migration') as mock_retype:
self.common.retype(self.data.test_attached_volume,
new_type, host)
mock_retype.assert_called_once()
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=PowerMaxCommonData.volume_details_attached)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
return_value=PowerMaxCommonData.sg_details[1])
@mock.patch.object(utils.PowerMaxUtils, 'get_child_sg_name',
return_value=('OS-Test-SG', '', '', ''))
@mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg',
return_value=True)
@mock.patch.object(masking.PowerMaxMasking,
'move_volume_between_storage_groups')
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
def test_retype_inuse_volume_tgt_sg_exist(self, mck_vol_in_sg, mck_sg_move,
mck_child_sg_in_sg,
mck_get_sg_name,
mck_get_sg, mck_get_vol):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
rep_mode = 'Synchronous'
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
retries = src_extra_specs['retries']
tgt_extra_specs = {
'srp': srp, 'array': array, 'slo': slo, 'workload': workload,
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
success = self.common._retype_inuse_volume(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)[0]
self.assertTrue(success)
mck_sg_move.assert_called()
mck_vol_in_sg.assert_called()
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=PowerMaxCommonData.volume_details_attached)
@mock.patch.object(utils.PowerMaxUtils, 'get_child_sg_name',
return_value=('OS-Test-SG', '', '', ''))
@mock.patch.object(provision.PowerMaxProvision, 'create_storage_group')
@mock.patch.object(masking.PowerMaxMasking, 'add_child_sg_to_parent_sg')
@mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg',
return_value=True)
@mock.patch.object(masking.PowerMaxMasking,
'move_volume_between_storage_groups')
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
def test_retype_inuse_volume_no_tgt_sg(self, mck_vol_in_sg, mck_move_vol,
mck_sg_in_sg, mck_add_sg_to_sg,
mck_create_sg, mck_get_csg_name,
mck_get_vol):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
rep_mode = 'Synchronous'
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
retries = src_extra_specs['retries']
tgt_extra_specs = {
'srp': srp, 'array': array, 'slo': slo, 'workload': workload,
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
with mock.patch.object(self.rest, 'get_storage_group',
side_effect=[PowerMaxCommonData.sg_details[1],
None,
PowerMaxCommonData.sg_details[1]]):
success = self.common._retype_inuse_volume(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)[0]
mck_create_sg.assert_called()
mck_add_sg_to_sg.assert_called()
self.assertTrue(success)
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=PowerMaxCommonData.volume_details_attached)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
return_value=PowerMaxCommonData.sg_details[1])
@mock.patch.object(utils.PowerMaxUtils, 'get_child_sg_name',
return_value=('OS-Test-SG', '', '', ''))
@mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg',
return_value=False)
@mock.patch.object(masking.PowerMaxMasking,
'move_volume_between_storage_groups')
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=False)
def test_retype_inuse_volume_fail(self, mck_vol_in_sg, mck_sg_move,
mck_child_sg_in_sg, mck_get_sg_name,
mck_get_sg, mck_get_vol):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
rep_mode = 'Synchronous'
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
retries = src_extra_specs['retries']
tgt_extra_specs = {
'srp': srp, 'array': array, 'slo': slo, 'workload': workload,
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
success = self.common._retype_inuse_volume(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)[0]
self.assertFalse(success)
mck_vol_in_sg.assert_not_called()
mck_sg_move.assert_not_called()
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=PowerMaxCommonData.volume_details_attached)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
return_value=PowerMaxCommonData.sg_details[1])
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_attached_hostname',
return_value=None)
def test_retype_inuse_volume_fail_no_attached_host(self, mck_get_hostname,
mck_get_sg,
mck_get_vol):
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
rep_mode = 'Synchronous'
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
retries = src_extra_specs['retries']
tgt_extra_specs = {
'srp': srp, 'array': array, 'slo': slo, 'workload': workload,
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
success = self.common._retype_inuse_volume(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)[0]
self.assertFalse(success)
def test_slo_workload_migration_valid(self): def test_slo_workload_migration_valid(self):
device_id = self.data.device_id device_id = self.data.device_id
@ -5447,6 +5655,105 @@ class PowerMaxCommonTest(test.TestCase):
self.assertTrue(migrate_status) self.assertTrue(migrate_status)
mock_remove.assert_not_called() mock_remove.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, 'cleanup_lun_replication')
@mock.patch.object(common.PowerMaxCommon, '_retype_inuse_volume',
return_value=(True, 'Test'))
@mock.patch.object(common.PowerMaxCommon,
'setup_inuse_volume_replication',
return_value=('Status', 'Data', 'Info'))
@mock.patch.object(common.PowerMaxCommon, '_retype_remote_volume',
return_value=True)
def test_migrate_in_use_volume(self, mck_remote_retype, mck_setup,
mck_retype, mck_cleanup):
# Array/Volume info
array = self.data.array
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
device_id = self.data.device_id
volume = self.data.test_attached_volume
volume_name = self.data.test_attached_volume.name
# Rep Config
rep_mode = 'Synchronous'
self.common.rep_config = {'mode': rep_mode}
# Extra Specs
new_type = {'extra_specs': {}}
src_extra_specs = self.data.extra_specs_migrate
interval = src_extra_specs['interval']
retries = src_extra_specs['retries']
tgt_extra_specs = {
'srp': srp, 'array': array, 'slo': slo, 'workload': workload,
'interval': interval, 'retries': retries, 'rep_mode': rep_mode}
def _reset_mocks():
mck_cleanup.reset_mock()
mck_setup.reset_mock()
mck_retype.reset_mock()
mck_remote_retype.reset_mock()
# Scenario 1: no_rep => no_rep
with mock.patch.object(self.utils, 'is_replication_enabled',
side_effect=[False, False]):
success = self.common._migrate_volume(
array, volume, device_id, srp, slo, workload, volume_name,
new_type, src_extra_specs)[0]
mck_retype.assert_called_once_with(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)
mck_cleanup.assert_not_called()
mck_setup.assert_not_called()
mck_remote_retype.assert_not_called()
self.assertTrue(success)
_reset_mocks()
# Scenario 2: rep => no_rep
with mock.patch.object(self.utils, 'is_replication_enabled',
side_effect=[True, False]):
success = self.common._migrate_volume(
array, volume, device_id, srp, slo, workload, volume_name,
new_type, src_extra_specs)[0]
mck_cleanup.assert_called_once_with(
volume, volume_name, device_id, src_extra_specs)
mck_retype.assert_called_once_with(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)
mck_setup.assert_not_called()
mck_remote_retype.assert_not_called()
self.assertTrue(success)
_reset_mocks()
# Scenario 3: no_rep => rep
with mock.patch.object(self.utils, 'is_replication_enabled',
side_effect=[False, True]):
success = self.common._migrate_volume(
array, volume, device_id, srp, slo, workload, volume_name,
new_type, src_extra_specs)[0]
mck_setup.assert_called_once_with(
self.data.array, volume, device_id, src_extra_specs)
mck_retype.assert_called_once_with(
array, srp, volume, device_id, src_extra_specs, slo,
workload, tgt_extra_specs, False)
mck_cleanup.assert_not_called()
mck_remote_retype.assert_not_called()
self.assertTrue(success)
_reset_mocks()
# Scenario 4: rep => rep
with mock.patch.object(self.utils, 'is_replication_enabled',
side_effect=[True, True]):
success = self.common._migrate_volume(
array, volume, device_id, srp, slo, workload, volume_name,
new_type, src_extra_specs)[0]
mck_retype.assert_called_once_with(
array, srp, volume, device_id, src_extra_specs, slo, workload,
tgt_extra_specs, False)
mck_remote_retype.assert_called_once_with(
array, volume, device_id, volume_name, rep_mode, True,
tgt_extra_specs)
mck_cleanup.assert_not_called()
mck_setup.assert_not_called()
self.assertTrue(success)
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove): def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove):
device_id = self.data.device_id device_id = self.data.device_id
@ -6444,9 +6751,9 @@ class PowerMaxFCTest(test.TestCase):
host = {'host': self.data.new_host} host = {'host': self.data.new_host}
new_type = {'extra_specs': {}} new_type = {'extra_specs': {}}
with mock.patch.object(self.common, 'retype', with mock.patch.object(self.common, 'retype',
return_value=True) as mock_retype: return_value=True) as mck_retype:
self.driver.retype({}, self.data.test_volume, new_type, '', host) self.driver.retype({}, self.data.test_volume, new_type, '', host)
mock_retype.assert_called_once_with( mck_retype.assert_called_once_with(
self.data.test_volume, new_type, host) self.data.test_volume, new_type, host)
def test_failover_host(self): def test_failover_host(self):
@ -6759,9 +7066,9 @@ class PowerMaxISCSITest(test.TestCase):
host = {'host': self.data.new_host} host = {'host': self.data.new_host}
new_type = {'extra_specs': {}} new_type = {'extra_specs': {}}
with mock.patch.object(self.common, 'retype', with mock.patch.object(self.common, 'retype',
return_value=True) as mock_retype: return_value=True) as mck_retype:
self.driver.retype({}, self.data.test_volume, new_type, '', host) self.driver.retype({}, self.data.test_volume, new_type, '', host)
mock_retype.assert_called_once_with( mck_retype.assert_called_once_with(
self.data.test_volume, new_type, host) self.data.test_volume, new_type, host)
def test_failover_host(self): def test_failover_host(self):
@ -8163,6 +8470,33 @@ class PowerMaxCommonReplicationTest(test.TestCase):
'device_id': self.data.device_id2}, rep_data) 'device_id': self.data.device_id2}, rep_data)
mock_create.assert_not_called() mock_create.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(PowerMaxCommonData.rdf_group_no,
PowerMaxCommonData.remote_array))
@mock.patch.object(rest.PowerMaxRest, 'get_size_of_device_on_array',
return_value=2)
@mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=PowerMaxCommonData.rep_extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_create_volume',
return_value=PowerMaxCommonData.provider_location)
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
@mock.patch.object(rest.PowerMaxRest, 'create_rdf_device_pair',
return_value=PowerMaxCommonData.rdf_group_details)
def test_setup_inuse_volume_replication(self, mck_create_rdf_pair,
mck_sync_chk, mck_create_vol,
mck_rep_specs, mck_get_vol_size,
mck_get_rdf_info):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_attached_volume
extra_specs = self.data.extra_specs_migrate
self.rep_config = self.data.rep_extra_specs4
rep_status, rep_data, __ = (
self.common.setup_inuse_volume_replication(
array, volume, device_id, extra_specs))
self.assertEqual('enabled', rep_status)
self.assertEqual(self.data.rdf_group_details, rep_data)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_remote_target') @mock.patch.object(common.PowerMaxCommon, '_cleanup_remote_target')
def test_cleanup_lun_replication_success(self, mock_clean): def test_cleanup_lun_replication_success(self, mock_clean):
rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs = deepcopy(self.data.rep_extra_specs)

View File

@ -1641,18 +1641,19 @@ class PowerMaxCommon(object):
return volume_name return volume_name
def _create_volume( def _create_volume(
self, volume_name, volume_size, extra_specs): self, volume_name, volume_size, extra_specs, in_use=False):
"""Create a volume. """Create a volume.
:param volume_name: the volume name :param volume_name: the volume name
:param volume_size: the volume size :param volume_size: the volume size
:param extra_specs: extra specifications :param extra_specs: extra specifications
:returns: dict -- volume_dict :param in_use: if the volume is in 'in-use' state
:return: volume_dict --dict
:raises: VolumeBackendAPIException: :raises: VolumeBackendAPIException:
""" """
array = extra_specs[utils.ARRAY] array = extra_specs[utils.ARRAY]
nextGen = self.rest.is_next_gen_array(array) next_gen = self.rest.is_next_gen_array(array)
if nextGen: if next_gen:
extra_specs[utils.WORKLOAD] = 'NONE' extra_specs[utils.WORKLOAD] = 'NONE'
is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( is_valid_slo, is_valid_workload = self.provision.verify_slo_workload(
array, extra_specs[utils.SLO], array, extra_specs[utils.SLO],
@ -1678,10 +1679,17 @@ class PowerMaxCommon(object):
do_disable_compression = self.utils.is_compression_disabled( do_disable_compression = self.utils.is_compression_disabled(
extra_specs) extra_specs)
# If the volume is in-use, set replication config for correct SG
# creation
if in_use and self.utils.is_replication_enabled(extra_specs):
is_re, rep_mode = True, extra_specs['rep_mode']
else:
is_re, rep_mode = False, None
storagegroup_name = self.masking.get_or_create_default_storage_group( storagegroup_name = self.masking.get_or_create_default_storage_group(
array, extra_specs[utils.SRP], extra_specs[utils.SLO], array, extra_specs[utils.SRP], extra_specs[utils.SLO],
extra_specs[utils.WORKLOAD], extra_specs, extra_specs[utils.WORKLOAD], extra_specs,
do_disable_compression) do_disable_compression, is_re, rep_mode)
try: try:
volume_dict = self.provision.create_volume_from_sg( volume_dict = self.provision.create_volume_from_sg(
array, volume_name, storagegroup_name, array, volume_name, storagegroup_name,
@ -2787,7 +2795,6 @@ class PowerMaxCommon(object):
{'volume': volume_name}) {'volume': volume_name})
extra_specs = self._initial_setup(volume) extra_specs = self._initial_setup(volume)
device_id = self._find_device_on_array(volume, extra_specs) device_id = self._find_device_on_array(volume, extra_specs)
if device_id is None: if device_id is None:
LOG.error("Volume %(name)s not found on the array. " LOG.error("Volume %(name)s not found on the array. "
@ -2795,18 +2802,6 @@ class PowerMaxCommon(object):
{'name': volume_name}) {'name': volume_name})
return False return False
# If the volume is attached, we can't support retype.
# Need to explicitly check this after the code change,
# as 'move' functionality will cause the volume to appear
# as successfully retyped, but will remove it from the masking view.
if volume.attach_status == 'attached':
LOG.error(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype "
"as it is attached.",
{'name': volume_name})
return False
return self._slo_workload_migration(device_id, volume, host, return self._slo_workload_migration(device_id, volume, host,
volume_name, new_type, extra_specs) volume_name, new_type, extra_specs)
@ -2883,7 +2878,7 @@ class PowerMaxCommon(object):
:param extra_specs: the extra specifications :param extra_specs: the extra specifications
:returns: bool :returns: bool
""" """
model_update, rep_mode, move_target = None, None, False model_update, rep_mode, move_target, success = None, None, False, False
target_extra_specs = new_type['extra_specs'] target_extra_specs = new_type['extra_specs']
target_extra_specs[utils.SRP] = srp target_extra_specs[utils.SRP] = srp
target_extra_specs[utils.ARRAY] = array target_extra_specs[utils.ARRAY] = array
@ -2893,52 +2888,92 @@ class PowerMaxCommon(object):
target_extra_specs[utils.RETRIES] = extra_specs[utils.RETRIES] target_extra_specs[utils.RETRIES] = extra_specs[utils.RETRIES]
is_compression_disabled = self.utils.is_compression_disabled( is_compression_disabled = self.utils.is_compression_disabled(
target_extra_specs) target_extra_specs)
if self.rep_config and self.rep_config.get('mode'): if self.rep_config and self.rep_config.get('mode'):
rep_mode = self.rep_config['mode'] rep_mode = self.rep_config['mode']
target_extra_specs[utils.REP_MODE] = rep_mode target_extra_specs[utils.REP_MODE] = rep_mode
was_rep_enabled = self.utils.is_replication_enabled(extra_specs) was_rep_enabled = self.utils.is_replication_enabled(extra_specs)
is_rep_enabled = self.utils.is_replication_enabled(target_extra_specs) is_rep_enabled = self.utils.is_replication_enabled(target_extra_specs)
if was_rep_enabled:
if not is_rep_enabled: if volume.attach_status == 'attached':
# Disable replication is True # Scenario: Rep was enabled, target VT has rep disabled, need to
self._remove_vol_and_cleanup_replication( # disable replication
array, device_id, volume_name, extra_specs, volume) if was_rep_enabled and not is_rep_enabled:
model_update = {'replication_status': REPLICATION_DISABLED, self.cleanup_lun_replication(volume, volume_name,
'replication_driver_data': None} device_id, extra_specs)
else: model_update = {
# Ensure both source and target volumes are retyped 'replication_status': REPLICATION_DISABLED,
move_target = True 'replication_driver_data': None}
else:
if is_rep_enabled: # Scenario: Rep was not enabled, target VT has rep enabled, need to
# Setup_volume_replication will put volume in correct sg # enable replication
rep_status, rdf_dict, __ = self.setup_volume_replication( elif not was_rep_enabled and is_rep_enabled:
array, volume, device_id, target_extra_specs) rep_status, rep_driver_data, rep_info_dict = (
self.setup_inuse_volume_replication(
array, volume, device_id, extra_specs))
model_update = { model_update = {
'replication_status': rep_status, 'replication_status': rep_status,
'replication_driver_data': six.text_type(rdf_dict)} 'replication_driver_data': six.text_type(rep_driver_data)}
return True, model_update
try: # Retype the device on the source array
target_sg_name = self.masking.get_or_create_default_storage_group( success, target_sg_name = self._retype_inuse_volume(
array, srp, target_slo, target_workload, extra_specs, array, srp, volume, device_id, extra_specs,
is_compression_disabled, is_rep_enabled, rep_mode) target_slo, target_workload, target_extra_specs,
except Exception as e: is_compression_disabled)
LOG.error("Failed to get or create storage group. "
"Exception received was %(e)s.", {'e': e})
return False
success = self._retype_volume( # If the volume was replication enabled both before and after
array, device_id, volume_name, target_sg_name, # retype, the volume needs to be retyped on the remote array also
volume, target_extra_specs) if was_rep_enabled and is_rep_enabled:
if success and move_target: success = self._retype_remote_volume(
success = self._retype_remote_volume( array, volume, device_id, volume_name,
array, volume, device_id, volume_name, rep_mode, is_rep_enabled, target_extra_specs)
rep_mode, is_rep_enabled, target_extra_specs)
self.volume_metadata.capture_retype_info( # Volume is not attached, retype as normal
volume, device_id, array, srp, target_slo, elif volume.attach_status != 'attached':
target_workload, target_sg_name, is_rep_enabled, rep_mode, if was_rep_enabled:
is_compression_disabled) if not is_rep_enabled:
# Disable replication is True
self._remove_vol_and_cleanup_replication(
array, device_id, volume_name, extra_specs, volume)
model_update = {'replication_status': REPLICATION_DISABLED,
'replication_driver_data': None}
else:
# Ensure both source and target volumes are retyped
move_target = True
else:
if is_rep_enabled:
# Setup_volume_replication will put volume in correct sg
rep_status, rdf_dict, __ = self.setup_volume_replication(
array, volume, device_id, target_extra_specs)
model_update = {
'replication_status': rep_status,
'replication_driver_data': six.text_type(rdf_dict)}
return True, model_update
try:
target_sg_name = (
self.masking.get_or_create_default_storage_group(
array, srp, target_slo, target_workload, extra_specs,
is_compression_disabled, is_rep_enabled, rep_mode))
except Exception as e:
LOG.error("Failed to get or create storage group. "
"Exception received was %(e)s.", {'e': e})
return False
success = self._retype_volume(
array, device_id, volume_name, target_sg_name,
volume, target_extra_specs)
if move_target:
success = self._retype_remote_volume(
array, volume, device_id, volume_name,
rep_mode, is_rep_enabled, target_extra_specs)
if success:
self.volume_metadata.capture_retype_info(
volume, device_id, array, srp, target_slo,
target_workload, target_sg_name, is_rep_enabled, rep_mode,
is_compression_disabled)
return success, model_update return success, model_update
@ -2988,6 +3023,75 @@ class PowerMaxCommon(object):
return True return True
def _retype_inuse_volume(self, array, srp, volume, device_id, extra_specs,
target_slo, target_workload, target_extra_specs,
is_compression_disabled):
"""Retype an in-use volume using storage assisted migration.
:param array: the array serial
:param srp: the SRP ID
:param volume: the volume object
:param device_id: the device id
:param extra_specs: the source volume type extra specs
:param target_slo: the service level of the target volume type
:param target_workload: the workload of the target volume type
:param target_extra_specs: the target extra specs
:param is_compression_disabled: if compression is disabled in the
target volume type
:return: if the retype was successful -- bool,
the storage group the volume has moved to --str
"""
success = False
device_info = self.rest.get_volume(array, device_id)
source_sg_name = device_info['storageGroupId'][0]
source_sg = self.rest.get_storage_group(array, source_sg_name)
target_extra_specs[utils.PORTGROUPNAME] = extra_specs[
utils.PORTGROUPNAME]
attached_host = self.utils.get_volume_attached_hostname(device_info)
if not attached_host:
LOG.error(
"There was an issue retrieving attached host from volume "
"%(volume_name)s, aborting storage-assisted migration.",
{'volume_name': device_id})
return False, None
target_sg_name, __, __, __ = self.utils.get_child_sg_name(
attached_host, target_extra_specs)
target_sg = self.rest.get_storage_group(array, target_sg_name)
if not target_sg:
self.provision.create_storage_group(array, target_sg_name, srp,
target_slo,
target_workload,
target_extra_specs,
is_compression_disabled)
parent_sg = source_sg['parent_storage_group'][0]
self.masking.add_child_sg_to_parent_sg(
array, target_sg_name, parent_sg, target_extra_specs)
target_sg = self.rest.get_storage_group(array, target_sg_name)
target_in_parent = self.rest.is_child_sg_in_parent_sg(
array, target_sg_name, target_sg['parent_storage_group'][0])
if target_sg and target_in_parent:
self.masking.move_volume_between_storage_groups(
array, device_id, source_sg_name, target_sg_name,
target_extra_specs)
success = self.rest.is_volume_in_storagegroup(
array, device_id, target_sg_name)
if not success:
LOG.error(
"Volume: %(volume_name)s has not been "
"added to target storage group %(storageGroup)s.",
{'volume_name': device_id,
'storageGroup': target_sg_name})
else:
LOG.info("Move successful: %(success)s", {'success': success})
return success, target_sg_name
def _retype_remote_volume(self, array, volume, device_id, def _retype_remote_volume(self, array, volume, device_id,
volume_name, rep_mode, is_re, extra_specs): volume_name, rep_mode, is_re, extra_specs):
"""Retype the remote volume. """Retype the remote volume.
@ -3194,6 +3298,64 @@ class PowerMaxCommon(object):
return replication_status, replication_driver_data, rep_info_dict return replication_status, replication_driver_data, rep_info_dict
def setup_inuse_volume_replication(self, array, volume, device_id,
extra_specs):
"""Setup replication for in-use volume.
:param array: the array serial number
:param volume: the volume object
:param device_id: the device id
:param extra_specs: the extra specifications
:return: replication_status -- str, replication_driver_data -- dict
rep_info_dict -- dict
"""
source_name = volume.name
LOG.debug('Starting replication setup '
'for volume: %s.', source_name)
rdf_group_no, remote_array = self.get_rdf_details(array)
extra_specs['replication_enabled'] = '<is> True'
extra_specs['rep_mode'] = self.rep_config['mode']
rdf_vol_size = volume.size
if rdf_vol_size == 0:
rdf_vol_size = self.rest.get_size_of_device_on_array(
array, device_id)
target_name = self.utils.get_volume_element_name(volume.id)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, self.rep_config)
volume_dict = self._create_volume(
target_name, rdf_vol_size, rep_extra_specs, in_use=True)
target_device_id = volume_dict['device_id']
LOG.debug("Create volume replica: Target device: %(target)s "
"Source Device: %(source)s "
"Volume identifier: %(name)s.",
{'target': target_device_id,
'source': device_id,
'name': target_name})
self._sync_check(array, device_id, extra_specs, tgt_only=True)
rdf_dict = self.rest.create_rdf_device_pair(
array, device_id, rdf_group_no, target_device_id, remote_array,
extra_specs)
LOG.info('Successfully setup replication for %s.',
target_name)
replication_status = REPLICATION_ENABLED
replication_driver_data = rdf_dict
rep_info_dict = self.volume_metadata.gather_replication_info(
volume.id, 'replication', False,
rdf_group_no=rdf_group_no,
target_name=target_name, remote_array=remote_array,
target_device_id=target_device_id,
replication_status=replication_status,
rep_mode=rep_extra_specs['rep_mode'],
rdf_group_label=self.rep_config['rdf_group_label'])
return replication_status, replication_driver_data, rep_info_dict
def _add_volume_to_async_rdf_managed_grp( def _add_volume_to_async_rdf_managed_grp(
self, array, device_id, volume_name, remote_array, self, array, device_id, volume_name, remote_array,
target_device_id, extra_specs): target_device_id, extra_specs):

View File

@ -106,6 +106,8 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
- Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand)
- Change from 84 to 90 REST endpoints (bug #1808539) - Change from 84 to 90 REST endpoints (bug #1808539)
- Fix for PowerMax OS replication settings (bug #1812685) - Fix for PowerMax OS replication settings (bug #1812685)
- Support for storage-assisted in-use retype
(bp/powermax-storage-assisted-inuse-retype)
""" """
VERSION = "4.0.0" VERSION = "4.0.0"

View File

@ -111,6 +111,8 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
- Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand)
- Change from 84 to 90 REST endpoints (bug #1808539) - Change from 84 to 90 REST endpoints (bug #1808539)
- Fix for PowerMax OS replication settings (bug #1812685) - Fix for PowerMax OS replication settings (bug #1812685)
- Support for storage-assisted in-use retype
(bp/powermax-storage-assisted-inuse-retype)
""" """
VERSION = "4.0.0" VERSION = "4.0.0"

View File

@ -731,10 +731,10 @@ class PowerMaxRest(object):
:param extra_specs: the extra specifications :param extra_specs: the extra specifications
""" """
payload = {"editStorageGroupActionParam": { payload = {"editStorageGroupActionParam": {
"expandStorageGroupParam": { "addExistingStorageGroupParam": {
"addExistingStorageGroupParam": { "storageGroupId": [child_sg]}}}
"storageGroupId": [child_sg]}}}} sc, job = self.modify_storage_group(array, parent_sg, payload,
sc, job = self.modify_storage_group(array, parent_sg, payload) version="83")
self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs) self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs)
def remove_child_sg_from_parent_sg( def remove_child_sg_from_parent_sg(

View File

@ -887,3 +887,16 @@ class PowerMaxUtils(object):
return False return False
return True return True
@staticmethod
def get_volume_attached_hostname(device_info):
"""Parse a hostname from a storage group ID.
:param device_info: the device info dict
:return: str -- the attached hostname
"""
try:
sg_id = device_info.get("storageGroupId")[0]
return sg_id.split('-')[1]
except IndexError:
return None

View File

@ -0,0 +1,5 @@
---
features:
- |
PowerMax for Cinder driver now supports storage-assisted in-use retype for
volumes including those in replication sessions.