VMAX driver - Implement SRDF Metro feature

SRDF/Metro allows a remotely replicated (SRDF) secondary device
to be read/write accessible to the host, taking on the external
identity of the primary device (geometry, device WWN, and so on).
Both the primary and secondary devices may then appear as a single
virtual device across the two SRDF paired arrays. With both devices
being accessible, the host can read and write to both primary and
secondary devices, with SRDF/Metro ensuring each copy remains current
and consistent, addressing any write conflicts which may occur.
This patch adds this functionality to the VMAX cinder driver.

Change-Id: Ib31763ea5759f7f9c0d2e2db68ba3820188245db
Partially-Implements: blueprint vmax-replication-enhancements
This commit is contained in:
Ciara Stacke 2017-11-01 02:34:14 +00:00 committed by Helen Walsh
parent f4ef603610
commit 925bdfbb06
9 changed files with 982 additions and 293 deletions

View File

@ -68,6 +68,7 @@ class VMAXCommonData(object):
storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG'
storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG' storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG'
defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG'
storagegroup_list = [defaultstoragegroup_name]
default_sg_no_slo = 'OS-no_SLO-SG' default_sg_no_slo = 'OS-no_SLO-SG'
default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG' default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG'
default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG' default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG'
@ -88,15 +89,16 @@ class VMAXCommonData(object):
group_snapshot_name = 'Grp_snapshot' group_snapshot_name = 'Grp_snapshot'
target_group_name = 'Grp_target' target_group_name = 'Grp_target'
storagegroup_name_with_id = 'GrpId_group_name' storagegroup_name_with_id = 'GrpId_group_name'
rdf_managed_async_grp = "OS-%s-async-rdf-sg" % rdf_group_name rdf_managed_async_grp = "OS-%s-Asynchronous-rdf-sg" % rdf_group_name
# connector info # connector info
wwpn1 = "123456789012345" wwpn1 = "123456789012345"
wwpn2 = "123456789054321" wwpn2 = "123456789054321"
wwnn1 = "223456789012345" wwnn1 = "223456789012345"
initiator = 'iqn.1993-08.org.debian: 01: 222' initiator = 'iqn.1993-08.org.debian: 01: 222'
ip = u'123.456.7.8' ip, ip2 = u'123.456.7.8', u'123.456.7.9'
iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001' iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001'
iqn2 = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002,t,0x0001'
connector = {'ip': ip, connector = {'ip': ip,
'initiator': initiator, 'initiator': initiator,
'wwpns': [wwpn1, wwpn2], 'wwpns': [wwpn1, wwpn2],
@ -113,6 +115,10 @@ class VMAXCommonData(object):
'initiator_group': initiatorgroup_name_f, 'initiator_group': initiatorgroup_name_f,
'port_group': port_group_name_f, 'port_group': port_group_name_f,
'target_wwns': target_wwns} 'target_wwns': target_wwns}
zoning_mappings_metro = deepcopy(zoning_mappings)
zoning_mappings_metro.update({'metro_port_group': port_group_name_f,
'metro_ig': initiatorgroup_name_f,
'metro_array': remote_array})
device_map = {} device_map = {}
for wwn in connector['wwpns']: for wwn in connector['wwpns']:
@ -131,6 +137,10 @@ class VMAXCommonData(object):
'array': array, 'array': array,
'controller': {'host': '10.00.00.00'}, 'controller': {'host': '10.00.00.00'},
'hostlunid': 3} 'hostlunid': 3}
iscsi_device_info_metro = deepcopy(iscsi_device_info)
iscsi_device_info_metro['metro_ip_and_iqn'] = [{'ip': ip2, 'iqn': iqn2}]
iscsi_device_info_metro['metro_hostlunid'] = 2
fc_device_info = {'maskingview': masking_view_name_f, fc_device_info = {'maskingview': masking_view_name_f,
'array': array, 'array': array,
'controller': {'host': '10.00.00.00'}, 'controller': {'host': '10.00.00.00'},
@ -246,7 +256,8 @@ class VMAXCommonData(object):
rep_extra_specs['retries'] = 0 rep_extra_specs['retries'] = 0
rep_extra_specs['srp'] = srp2 rep_extra_specs['srp'] = srp2
rep_extra_specs['rep_mode'] = 'Synchronous' rep_extra_specs['rep_mode'] = 'Synchronous'
rep_extra_specs2 = deepcopy(rep_extra_specs)
rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f
test_volume_type_1 = volume_type.VolumeType( test_volume_type_1 = volume_type.VolumeType(
id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc', id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc',
extra_specs=extra_specs extra_specs=extra_specs
@ -1285,6 +1296,19 @@ class VMAXUtilsTest(test.TestCase):
rep_device_list5[0]['mode'] = 'async' rep_device_list5[0]['mode'] = 'async'
rep_config5 = self.utils.get_replication_config(rep_device_list5) rep_config5 = self.utils.get_replication_config(rep_device_list5)
self.assertEqual(utils.REP_ASYNC, rep_config5['mode']) self.assertEqual(utils.REP_ASYNC, rep_config5['mode'])
# Success, mode is metro - no other options set
rep_device_list6 = rep_device_list5
rep_device_list6[0]['mode'] = 'metro'
rep_config6 = self.utils.get_replication_config(rep_device_list6)
self.assertFalse(rep_config6['metro_bias'])
self.assertFalse(rep_config6['allow_delete_metro'])
# Success, mode is metro - metro options true
rep_device_list7 = rep_device_list6
rep_device_list6[0].update(
{'allow_delete_metro': 'true', 'metro_use_bias': 'true'})
rep_config7 = self.utils.get_replication_config(rep_device_list7)
self.assertTrue(rep_config7['metro_bias'])
self.assertTrue(rep_config7['allow_delete_metro'])
def test_is_volume_failed_over(self): def test_is_volume_failed_over(self):
vol = deepcopy(self.data.test_volume) vol = deepcopy(self.data.test_volume)
@ -1407,12 +1431,33 @@ class VMAXUtilsTest(test.TestCase):
self.assertEqual('-RA', async_prefix) self.assertEqual('-RA', async_prefix)
sync_prefix = self.utils.get_replication_prefix(utils.REP_SYNC) sync_prefix = self.utils.get_replication_prefix(utils.REP_SYNC)
self.assertEqual('-RE', sync_prefix) self.assertEqual('-RE', sync_prefix)
metro_prefix = self.utils.get_replication_prefix(utils.REP_METRO)
self.assertEqual('-RM', metro_prefix)
def test_get_async_rdf_managed_grp_name(self): def test_get_async_rdf_managed_grp_name(self):
rep_config = {'rdf_group_label': self.data.rdf_group_name} rep_config = {'rdf_group_label': self.data.rdf_group_name,
'mode': utils.REP_ASYNC}
grp_name = self.utils.get_async_rdf_managed_grp_name(rep_config) grp_name = self.utils.get_async_rdf_managed_grp_name(rep_config)
self.assertEqual(self.data.rdf_managed_async_grp, grp_name) self.assertEqual(self.data.rdf_managed_async_grp, grp_name)
def test_is_metro_device(self):
rep_config = {'mode': utils.REP_METRO}
is_metro = self.utils.is_metro_device(
rep_config, self.data.rep_extra_specs)
self.assertTrue(is_metro)
rep_config2 = {'mode': utils.REP_ASYNC}
is_metro2 = self.utils.is_metro_device(
rep_config2, self.data.rep_extra_specs)
self.assertFalse(is_metro2)
def test_does_vol_need_rdf_management_group(self):
self.assertFalse(self.utils.does_vol_need_rdf_management_group(
self.data.rep_extra_specs))
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_ASYNC
self.assertTrue(self.utils.does_vol_need_rdf_management_group(
extra_specs))
class VMAXRestTest(test.TestCase): class VMAXRestTest(test.TestCase):
def setUp(self): def setUp(self):
@ -1626,18 +1671,6 @@ class VMAXRestTest(test.TestCase):
self.data.array, self.data.defaultstoragegroup_name) self.data.array, self.data.defaultstoragegroup_name)
self.assertEqual(ref_details, sg_details) self.assertEqual(ref_details, sg_details)
def test_get_storage_group_list(self):
ref_details = self.data.sg_list['storageGroupId']
sg_list = self.rest.get_storage_group_list(
self.data.array, {})
self.assertEqual(ref_details, sg_list)
def test_get_storage_group_list_none(self):
with mock.patch.object(self.rest, 'get_resource', return_value=None):
sg_list = self.rest.get_storage_group_list(
self.data.array, {})
self.assertEqual([], sg_list)
def test_create_storage_group(self): def test_create_storage_group(self):
with mock.patch.object(self.rest, 'create_resource'): with mock.patch.object(self.rest, 'create_resource'):
payload = {'someKey': 'someValue'} payload = {'someKey': 'someValue'}
@ -2575,10 +2608,11 @@ class VMAXRestTest(test.TestCase):
def test_create_rdf_device_pair(self): def test_create_rdf_device_pair(self):
ref_dict = {'array': self.data.remote_array, ref_dict = {'array': self.data.remote_array,
'device_id': self.data.device_id2} 'device_id': self.data.device_id2}
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_SYNC
rdf_dict = self.rest.create_rdf_device_pair( rdf_dict = self.rest.create_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no, self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.device_id2, self.data.remote_array, self.data.device_id2, self.data.remote_array, extra_specs)
self.data.extra_specs)
self.assertEqual(ref_dict, rdf_dict) self.assertEqual(ref_dict, rdf_dict)
def test_create_rdf_device_pair_async(self): def test_create_rdf_device_pair_async(self):
@ -2591,6 +2625,35 @@ class VMAXRestTest(test.TestCase):
self.data.device_id2, self.data.remote_array, extra_specs) self.data.device_id2, self.data.remote_array, extra_specs)
self.assertEqual(ref_dict, rdf_dict) self.assertEqual(ref_dict, rdf_dict)
def test_create_rdf_device_pair_metro(self):
ref_dict = {'array': self.data.remote_array,
'device_id': self.data.device_id2}
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_METRO
extra_specs[utils.METROBIAS] = True
rdf_dict = self.rest.create_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.device_id2, self.data.remote_array, extra_specs)
self.assertEqual(ref_dict, rdf_dict)
@mock.patch.object(rest.VMAXRest, 'get_rdf_group',
side_effect=[{'numDevices': 0}, {'numDevices': 0},
{'numDevices': 1}])
def test_get_metro_payload_info(self, mock_rdfg):
ref_payload = {"establish": 'true', "rdfType": 'RDF1'}
payload1 = self.rest.get_metro_payload_info(
self.data.array, ref_payload, self.data.rdf_group_no, {})
self.assertEqual(ref_payload, payload1)
payload2 = self.rest.get_metro_payload_info(
self.data.array, ref_payload, self.data.rdf_group_no,
{'metro_bias': True})
self.assertEqual('true', payload2['metroBias'])
ref_payload2 = {"establish": 'true', "rdfType": 'RDF1'}
payload3 = self.rest.get_metro_payload_info(
self.data.array, ref_payload2, self.data.rdf_group_no, {})
ref_payload3 = {"rdfType": 'NA', "format": 'true'}
self.assertEqual(ref_payload3, payload3)
def test_modify_rdf_device_pair(self): def test_modify_rdf_device_pair(self):
resource_name = "70/volume/00001" resource_name = "70/volume/00001"
common_opts = {"force": 'false', common_opts = {"force": 'false',
@ -2598,11 +2661,9 @@ class VMAXRestTest(test.TestCase):
"star": 'false', "star": 'false',
"hop2": 'false', "hop2": 'false',
"bypass": 'false'} "bypass": 'false'}
split_opts = deepcopy(common_opts) suspend_payload = {"action": "Suspend",
split_opts.update({"immediate": 'false'}) 'executionOption': 'ASYNCHRONOUS',
split_payload = {"action": "Split", "suspend": common_opts}
'executionOption': 'ASYNCHRONOUS',
"split": split_opts}
failover_opts = deepcopy(common_opts) failover_opts = deepcopy(common_opts)
failover_opts.update({"establish": 'true', failover_opts.update({"establish": 'true',
@ -2617,20 +2678,26 @@ class VMAXRestTest(test.TestCase):
return_value=(200, self.data.job_list[0])) as mock_mod: return_value=(200, self.data.job_list[0])) as mock_mod:
self.rest.modify_rdf_device_pair( self.rest.modify_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no, self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.extra_specs, split=True) self.data.extra_specs, suspend=True)
mock_mod.assert_called_once_with( mock_mod.assert_called_once_with(
self.data.array, 'replication', 'rdf_group', self.data.array, 'replication', 'rdf_group',
split_payload, resource_name=resource_name, suspend_payload, resource_name=resource_name,
private='/private') private='/private')
mock_mod.reset_mock() mock_mod.reset_mock()
self.rest.modify_rdf_device_pair( self.rest.modify_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no, self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.extra_specs, split=False) self.data.extra_specs, suspend=False)
mock_mod.assert_called_once_with( mock_mod.assert_called_once_with(
self.data.array, 'replication', 'rdf_group', self.data.array, 'replication', 'rdf_group',
failover_payload, resource_name=resource_name, failover_payload, resource_name=resource_name,
private='/private') private='/private')
@mock.patch.object(rest.VMAXRest, 'delete_resource')
def test_delete_rdf_pair(self, mock_del):
self.rest.delete_rdf_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no)
mock_del.assert_called_once()
def test_get_storage_group_rep(self): def test_get_storage_group_rep(self):
array = self.data.array array = self.data.array
source_group_name = self.data.storagegroup_name_source source_group_name = self.data.storagegroup_name_source
@ -2950,24 +3017,36 @@ class VMAXProvisionTest(test.TestCase):
self.data.array, 'no_workload_sg')) self.data.array, 'no_workload_sg'))
self.assertEqual(ref_settings2, sg_slo_settings2) self.assertEqual(ref_settings2, sg_slo_settings2)
def test_break_rdf_relationship(self): @mock.patch.object(rest.VMAXRest, 'wait_for_rdf_consistent_state')
@mock.patch.object(rest.VMAXRest, 'delete_rdf_pair')
@mock.patch.object(rest.VMAXRest, 'modify_rdf_device_pair')
def test_break_rdf_relationship(self, mock_mod, mock_del, mock_wait):
array = self.data.array array = self.data.array
device_id = self.data.device_id device_id = self.data.device_id
target_device = self.data.device_id2 target_device = self.data.device_id2
rdf_group_name = self.data.rdf_group_name rdf_group_name = self.data.rdf_group_name
rep_extra_specs = self.data.rep_extra_specs rep_extra_specs = self.data.rep_extra_specs
with mock.patch.object( self.provision.break_rdf_relationship(
self.provision.rest, 'modify_rdf_device_pair') as mod_rdf: array, device_id, target_device,
with mock.patch.object( rdf_group_name, rep_extra_specs, "Synchronized")
self.provision.rest, 'delete_rdf_pair') as del_rdf: mock_mod.assert_called_once_with(
self.provision.break_rdf_relationship( array, device_id, rdf_group_name, rep_extra_specs,
array, device_id, target_device, suspend=True)
rdf_group_name, rep_extra_specs, "Synchronized") mock_del.assert_called_once_with(
mod_rdf.assert_called_once_with( array, device_id, rdf_group_name)
array, device_id, rdf_group_name, rep_extra_specs, # sync still in progress
split=True) self.provision.break_rdf_relationship(
del_rdf.assert_called_once_with( array, device_id, target_device,
array, device_id, rdf_group_name) rdf_group_name, rep_extra_specs, "SyncInProg")
mock_wait.assert_called_once()
@mock.patch.object(provision.VMAXProvision, 'disable_group_replication')
@mock.patch.object(provision.VMAXProvision, 'delete_rdf_pair')
def test_break_metro_rdf_pair(self, mock_del, mock_disable):
self.provision.break_metro_rdf_pair(
self.data.array, self.data.device_id, self.data.device_id2,
self.data.rdf_group_no, self.data.rep_extra_specs, 'metro_grp')
mock_del.assert_called_once()
def test_delete_rdf_pair_async(self): def test_delete_rdf_pair_async(self):
with mock.patch.object( with mock.patch.object(
@ -2976,7 +3055,7 @@ class VMAXProvisionTest(test.TestCase):
extra_specs[utils.REP_MODE] = utils.REP_ASYNC extra_specs[utils.REP_MODE] = utils.REP_ASYNC
self.provision.delete_rdf_pair( self.provision.delete_rdf_pair(
self.data.array, self.data.device_id, self.data.array, self.data.device_id,
self.data.rdf_group_no, extra_specs) self.data.rdf_group_no, self.data.device_id2, extra_specs)
mock_del_rdf.assert_called_once() mock_del_rdf.assert_called_once()
def test_failover_volume(self): def test_failover_volume(self):
@ -2990,15 +3069,13 @@ class VMAXProvisionTest(test.TestCase):
array, device_id, rdf_group_name, array, device_id, rdf_group_name,
extra_specs, '', True) extra_specs, '', True)
mod_rdf.assert_called_once_with( mod_rdf.assert_called_once_with(
array, device_id, rdf_group_name, extra_specs, array, device_id, rdf_group_name, extra_specs)
split=False)
mod_rdf.reset_mock() mod_rdf.reset_mock()
self.provision.failover_volume( self.provision.failover_volume(
array, device_id, rdf_group_name, array, device_id, rdf_group_name,
extra_specs, '', False) extra_specs, '', False)
mod_rdf.assert_called_once_with( mod_rdf.assert_called_once_with(
array, device_id, rdf_group_name, extra_specs, array, device_id, rdf_group_name, extra_specs)
split=False)
def test_create_volume_group_success(self): def test_create_volume_group_success(self):
array = self.data.array array = self.data.array
@ -3341,28 +3418,26 @@ class VMAXCommonTest(test.TestCase):
volume, connector, extra_specs, masking_view_dict) volume, connector, extra_specs, masking_view_dict)
self.assertEqual(ref_dict, device_info_dict) self.assertEqual(ref_dict, device_info_dict)
def test_attach_volume_failed(self): @mock.patch.object(masking.VMAXMasking,
'check_if_rollback_action_for_masking_required')
@mock.patch.object(masking.VMAXMasking, 'setup_masking_view',
return_value={})
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
return_value=({}, False, []))
def test_attach_volume_failed(self, mock_lun, mock_setup, mock_rollback):
volume = self.data.test_volume volume = self.data.test_volume
connector = self.data.connector connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs) extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict( masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs) volume, connector, extra_specs)
with mock.patch.object(self.masking, 'setup_masking_view', self.assertRaises(exception.VolumeBackendAPIException,
return_value={}): self.common._attach_volume, volume,
with mock.patch.object(self.common, 'find_host_lun_id', connector, extra_specs,
return_value=({}, False, [])): masking_view_dict)
with mock.patch.object( device_id = self.data.device_id
self.masking, (mock_rollback.assert_called_once_with(
'check_if_rollback_action_for_masking_required'): self.data.array, volume, device_id, {}))
self.assertRaises(exception.VolumeBackendAPIException,
self.common._attach_volume, volume,
connector, extra_specs,
masking_view_dict)
device_id = self.data.device_id
(self.masking.
check_if_rollback_action_for_masking_required.
assert_called_once_with(self.data.array, device_id, {}))
def test_terminate_connection(self): def test_terminate_connection(self):
volume = self.data.test_volume volume = self.data.test_volume
@ -3490,21 +3565,35 @@ class VMAXCommonTest(test.TestCase):
volume, host, extra_specs) volume, host, extra_specs)
self.assertEqual({}, maskedvols) self.assertEqual({}, maskedvols)
@mock.patch.object(common.VMAXCommon, 'get_remote_target_device',
return_value=VMAXCommonData.device_id2)
def test_find_host_lun_id_rep_extra_specs(self, mock_tgt):
self.common.find_host_lun_id(
self.data.test_volume, 'HostX',
self.data.extra_specs, self.data.rep_extra_specs)
mock_tgt.assert_called_once()
def test_get_masking_views_from_volume(self): def test_get_masking_views_from_volume(self):
array = self.data.array array = self.data.array
device_id = self.data.device_id device_id = self.data.device_id
host = 'HostX' host = 'HostX'
ref_mv_list = [self.data.masking_view_name_f] ref_mv_list = [self.data.masking_view_name_f]
maskingview_list = self.common.get_masking_views_from_volume( maskingview_list, __ = self.common.get_masking_views_from_volume(
array, device_id, host) array, self.data.test_volume, device_id, host)
self.assertEqual(ref_mv_list, maskingview_list) self.assertEqual(ref_mv_list, maskingview_list)
# is metro
with mock.patch.object(self.utils, 'is_metro_device',
return_value=True):
__, is_metro = self.common.get_masking_views_from_volume(
array, self.data.test_volume, device_id, host)
self.assertTrue(is_metro)
def test_get_masking_views_from_volume_wrong_host(self): def test_get_masking_views_from_volume_wrong_host(self):
array = self.data.array array = self.data.array
device_id = self.data.device_id device_id = self.data.device_id
host = 'DifferentHost' host = 'DifferentHost'
maskingview_list = self.common.get_masking_views_from_volume( maskingview_list, __ = self.common.get_masking_views_from_volume(
array, device_id, host) array, self.data.test_volume, device_id, host)
self.assertEqual([], maskingview_list) self.assertEqual([], maskingview_list)
def test_find_host_lun_id_no_host_check(self): def test_find_host_lun_id_no_host_check(self):
@ -3546,7 +3635,9 @@ class VMAXCommonTest(test.TestCase):
self.assertRaises(exception.VolumeBackendAPIException, self.assertRaises(exception.VolumeBackendAPIException,
self.common._initial_setup, volume) self.common._initial_setup, volume)
def test_populate_masking_dict(self): @mock.patch.object(common.VMAXCommon, 'get_remote_target_device',
return_value=VMAXCommonData.device_id2)
def test_populate_masking_dict(self, mock_tgt):
volume = self.data.test_volume volume = self.data.test_volume
connector = self.data.connector connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs) extra_specs = deepcopy(self.data.extra_specs)
@ -3555,6 +3646,18 @@ class VMAXCommonTest(test.TestCase):
masking_view_dict = self.common._populate_masking_dict( masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs) volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict) self.assertEqual(ref_mv_dict, masking_view_dict)
# Metro volume, pass in rep_extra_specs and retrieve target device
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common._populate_masking_dict(
volume, connector, extra_specs, rep_extra_specs)
mock_tgt.assert_called_once()
# device_id is None
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._populate_masking_dict,
volume, connector, extra_specs)
def test_populate_masking_dict_no_slo(self): def test_populate_masking_dict_no_slo(self):
volume = self.data.test_volume volume = self.data.test_volume
@ -3838,27 +3941,41 @@ class VMAXCommonTest(test.TestCase):
mock_clean.assert_called_once_with( mock_clean.assert_called_once_with(
volume, volume_name, device_id, extra_specs) volume, volume_name, device_id, extra_specs)
@mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over',
side_effect=[True, False])
@mock.patch.object(common.VMAXCommon, '_get_replication_extra_specs', @mock.patch.object(common.VMAXCommon, '_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs) return_value=VMAXCommonData.rep_extra_specs)
def test_get_target_wwns_from_masking_view(self, mock_rep_specs): def test_get_target_wwns_from_masking_view(self, mock_rep_specs, mock_fo):
target_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
ref_wwns = [self.data.wwnn1] ref_wwns = [self.data.wwnn1]
self.assertEqual(ref_wwns, target_wwns) for x in range(0, 2):
# Volume is failed over target_wwns = self.common._get_target_wwns_from_masking_view(
with mock.patch.object(self.utils, 'is_volume_failed_over', self.data.device_id, self.data.connector['host'],
return_value=True): self.data.extra_specs)
self.common.get_target_wwns_from_masking_view( self.assertEqual(ref_wwns, target_wwns)
self.data.test_volume, self.data.connector)
mock_rep_specs.assert_called_once()
def test_get_target_wwns_from_masking_view_no_mv(self): def test_get_target_wwns_from_masking_view_no_mv(self):
with mock.patch.object(self.common, 'get_masking_views_from_volume', with mock.patch.object(self.common, '_get_masking_views_from_volume',
return_value=None): return_value=None):
target_wwns = self.common.get_target_wwns_from_masking_view( target_wwns = self.common._get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector) self.data.device_id, self.data.connector['host'],
self.data.extra_specs)
self.assertEqual([], target_wwns) self.assertEqual([], target_wwns)
@mock.patch.object(common.VMAXCommon, '_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs)
@mock.patch.object(common.VMAXCommon, 'get_remote_target_device',
return_value=(VMAXCommonData.device_id2,))
@mock.patch.object(utils.VMAXUtils, 'is_metro_device',
side_effect=[False, True])
def test_get_target_wwns(self, mock_metro, mock_tgt, mock_specs):
__, metro_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertEqual([], metro_wwns)
# Is metro volume
__, metro_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertEqual([self.data.wwnn1], metro_wwns)
def test_get_port_group_from_masking_view(self): def test_get_port_group_from_masking_view(self):
array = self.data.array array = self.data.array
maskingview_name = self.data.masking_view_name_f maskingview_name = self.data.masking_view_name_f
@ -4724,11 +4841,20 @@ class VMAXFCTest(test.TestCase):
def test_get_zoning_mappings_no_mv(self): def test_get_zoning_mappings_no_mv(self):
with mock.patch.object(self.common, 'get_masking_views_from_volume', with mock.patch.object(self.common, 'get_masking_views_from_volume',
return_value=None): return_value=(None, False)):
zoning_mappings = self.driver._get_zoning_mappings( zoning_mappings = self.driver._get_zoning_mappings(
self.data.test_volume, self.data.connector) self.data.test_volume, self.data.connector)
self.assertEqual({}, zoning_mappings) self.assertEqual({}, zoning_mappings)
@mock.patch.object(
common.VMAXCommon, 'get_masking_views_from_volume',
return_value=([VMAXCommonData.masking_view_name_f], True))
def test_get_zoning_mappings_metro(self, mock_mv):
ref_mappings = self.data.zoning_mappings_metro
zoning_mappings = self.driver._get_zoning_mappings(
self.data.test_volume, self.data.connector)
self.assertEqual(ref_mappings, zoning_mappings)
def test_cleanup_zones_other_vols_mapped(self): def test_cleanup_zones_other_vols_mapped(self):
ref_data = {'driver_volume_type': 'fibre_channel', ref_data = {'driver_volume_type': 'fibre_channel',
'data': {}} 'data': {}}
@ -4754,7 +4880,7 @@ class VMAXFCTest(test.TestCase):
driver = fc.VMAXFCDriver(configuration=self.configuration) driver = fc.VMAXFCDriver(configuration=self.configuration)
with mock.patch.object(driver.common, with mock.patch.object(driver.common,
'get_target_wwns_from_masking_view', 'get_target_wwns_from_masking_view',
return_value=self.data.target_wwns): return_value=(self.data.target_wwns, [])):
targets, target_map = driver._build_initiator_target_map( targets, target_map = driver._build_initiator_target_map(
self.data.test_volume, self.data.connector) self.data.test_volume, self.data.connector)
self.assertEqual(ref_target_map, target_map) self.assertEqual(ref_target_map, target_map)
@ -4946,7 +5072,7 @@ class VMAXISCSITest(test.TestCase):
data = self.driver.get_iscsi_dict(device_info, volume) data = self.driver.get_iscsi_dict(device_info, volume)
self.assertEqual(ref_data, data) self.assertEqual(ref_data, data)
self.driver.vmax_get_iscsi_properties.assert_called_once_with( self.driver.vmax_get_iscsi_properties.assert_called_once_with(
volume, ip_and_iqn, True, host_lun_id) volume, ip_and_iqn, True, host_lun_id, None, None)
def test_get_iscsi_dict_exception(self): def test_get_iscsi_dict_exception(self):
device_info = {'ip_and_iqn': ''} device_info = {'ip_and_iqn': ''}
@ -4954,6 +5080,22 @@ class VMAXISCSITest(test.TestCase):
self.driver.get_iscsi_dict, self.driver.get_iscsi_dict,
device_info, self.data.test_volume) device_info, self.data.test_volume)
def test_get_iscsi_dict_metro(self):
ip_and_iqn = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
host_lun_id = self.data.iscsi_device_info_metro['hostlunid']
volume = self.data.test_volume
device_info = self.data.iscsi_device_info_metro
ref_data = {'driver_volume_type': 'iscsi', 'data': {}}
with mock.patch.object(
self.driver, 'vmax_get_iscsi_properties', return_value={}):
data = self.driver.get_iscsi_dict(device_info, volume)
self.assertEqual(ref_data, data)
self.driver.vmax_get_iscsi_properties.assert_called_once_with(
volume, ip_and_iqn, True, host_lun_id,
self.data.iscsi_device_info_metro['metro_ip_and_iqn'],
self.data.iscsi_device_info_metro['metro_hostlunid'])
def test_vmax_get_iscsi_properties_one_target_no_auth(self): def test_vmax_get_iscsi_properties_one_target_no_auth(self):
vol = deepcopy(self.data.test_volume) vol = deepcopy(self.data.test_volume)
ip_and_iqn = self.common._find_ip_and_iqns( ip_and_iqn = self.common._find_ip_and_iqns(
@ -4966,7 +5108,7 @@ class VMAXISCSITest(test.TestCase):
'target_lun': host_lun_id, 'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id} 'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties( iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id) vol, ip_and_iqn, True, host_lun_id, [], None)
self.assertEqual(type(ref_properties), type(iscsi_properties)) self.assertEqual(type(ref_properties), type(iscsi_properties))
self.assertEqual(ref_properties, iscsi_properties) self.assertEqual(ref_properties, iscsi_properties)
@ -4986,7 +5128,7 @@ class VMAXISCSITest(test.TestCase):
'target_lun': host_lun_id, 'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id} 'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties( iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.test_volume, ip_and_iqn, True, host_lun_id) self.data.test_volume, ip_and_iqn, True, host_lun_id, [], None)
self.assertEqual(ref_properties, iscsi_properties) self.assertEqual(ref_properties, iscsi_properties)
def test_vmax_get_iscsi_properties_auth(self): def test_vmax_get_iscsi_properties_auth(self):
@ -5017,10 +5159,33 @@ class VMAXISCSITest(test.TestCase):
'auth_username': 'auth_username', 'auth_username': 'auth_username',
'auth_password': 'auth_secret'} 'auth_password': 'auth_secret'}
iscsi_properties = self.driver.vmax_get_iscsi_properties( iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id) vol, ip_and_iqn, True, host_lun_id, None, None)
self.assertEqual(ref_properties, iscsi_properties) self.assertEqual(ref_properties, iscsi_properties)
self.driver.configuration = backup_conf self.driver.configuration = backup_conf
def test_vmax_get_iscsi_properties_metro(self):
ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.iqn}]
total_ip_list = [{'ip': self.data.ip, 'iqn': self.data.iqn},
{'ip': self.data.ip2, 'iqn': self.data.iqn2}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
host_lun_id2 = self.data.iscsi_device_info_metro['metro_hostlunid']
ref_properties = {
'target_portals': (
[t['ip'] + ":3260" for t in total_ip_list]),
'target_iqns': (
[t['iqn'].split(",")[0] for t in total_ip_list]),
'target_luns': [host_lun_id, host_lun_id2],
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0],
'target_portal': ip_and_iqn[0]['ip'] + ":3260",
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.test_volume, ip_and_iqn, True, host_lun_id,
self.data.iscsi_device_info_metro['metro_ip_and_iqn'],
self.data.iscsi_device_info_metro['metro_hostlunid'])
self.assertEqual(ref_properties, iscsi_properties)
def test_terminate_connection(self): def test_terminate_connection(self):
with mock.patch.object(self.common, 'terminate_connection'): with mock.patch.object(self.common, 'terminate_connection'):
self.driver.terminate_connection(self.data.test_volume, self.driver.terminate_connection(self.data.test_volume,
@ -5137,6 +5302,8 @@ class VMAXMaskingTest(test.TestCase):
configuration.config_group = 'MaskingTests' configuration.config_group = 'MaskingTests'
self._gather_info = common.VMAXCommon._gather_info self._gather_info = common.VMAXCommon._gather_info
common.VMAXCommon._gather_info = mock.Mock() common.VMAXCommon._gather_info = mock.Mock()
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = common.VMAXCommon( driver = common.VMAXCommon(
'iSCSI', self.data.version, configuration=configuration) 'iSCSI', self.data.version, configuration=configuration)
driver_fc = common.VMAXCommon( driver_fc = common.VMAXCommon(
@ -5180,7 +5347,8 @@ class VMAXMaskingTest(test.TestCase):
@mock.patch.object( @mock.patch.object(
rest.VMAXRest, rest.VMAXRest,
'get_element_from_masking_view', 'get_element_from_masking_view',
side_effect=[VMAXCommonData.port_group_name_i, Exception]) side_effect=[VMAXCommonData.port_group_name_i,
Exception('Exception')])
def test_get_or_create_masking_view_and_map_lun( def test_get_or_create_masking_view_and_map_lun(
self, mock_masking_view_element, mock_masking, mock_move, self, mock_masking_view_element, mock_masking, mock_move,
mock_add_volume): mock_add_volume):
@ -5560,8 +5728,8 @@ class VMAXMaskingTest(test.TestCase):
'get_storage_groups_from_volume', 'get_storage_groups_from_volume',
side_effect=[ side_effect=[
exception.VolumeBackendAPIException, exception.VolumeBackendAPIException,
self.data.defaultstoragegroup_name, self.data.storagegroup_list,
self.data.defaultstoragegroup_name, None, self.data.storagegroup_list, None,
None, ]): None, ]):
self.assertRaises( self.assertRaises(
exception.VolumeBackendAPIException, exception.VolumeBackendAPIException,
@ -6064,6 +6232,13 @@ class VMAXCommonReplicationTest(test.TestCase):
rest.VMAXRest._establish_rest_session = mock.Mock( rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession()) return_value=FakeRequestsSession())
driver = fc.VMAXFCDriver(configuration=configuration) driver = fc.VMAXFCDriver(configuration=configuration)
iscsi_fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_i)
iscsi_config = FakeConfiguration(
iscsi_fake_xml, config_group,
replication_device=self.replication_device)
iscsi_driver = iscsi.VMAXISCSIDriver(configuration=iscsi_config)
self.iscsi_common = iscsi_driver.common
self.driver = driver self.driver = driver
self.common = self.driver.common self.common = self.driver.common
self.masking = self.common.masking self.masking = self.common.masking
@ -6087,6 +6262,16 @@ class VMAXCommonReplicationTest(test.TestCase):
self.fake_xml, config_group, self.fake_xml, config_group,
replication_device=self.async_rep_device) replication_device=self.async_rep_device)
self.async_driver = fc.VMAXFCDriver(configuration=async_configuration) self.async_driver = fc.VMAXFCDriver(configuration=async_configuration)
self.metro_rep_device = {
'target_device_id': self.data.remote_array,
'remote_port_group': self.data.port_group_name_f,
'remote_pool': self.data.srp2,
'rdf_group_label': self.data.rdf_group_name,
'allow_extend': 'True', 'mode': 'metro'}
metro_configuration = FakeConfiguration(
self.fake_xml, config_group,
replication_device=self.metro_rep_device)
self.metro_driver = fc.VMAXFCDriver(configuration=metro_configuration)
def test_get_replication_info(self): def test_get_replication_info(self):
self.common._get_replication_info() self.common._get_replication_info()
@ -6186,6 +6371,17 @@ class VMAXCommonReplicationTest(test.TestCase):
self.common._unmap_lun(self.data.test_volume, self.data.connector) self.common._unmap_lun(self.data.test_volume, self.data.connector)
mock_es.assert_called_once_with(extra_specs, rep_config) mock_es.assert_called_once_with(extra_specs, rep_config)
@mock.patch.object(common.VMAXCommon, '_remove_members')
@mock.patch.object(common.VMAXCommon,
'_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs)
@mock.patch.object(utils.VMAXUtils, 'is_metro_device', return_value=True)
def test_unmap_lun_metro(self, mock_md, mock_es, mock_rm):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common._unmap_lun(self.data.test_volume, self.data.connector)
self.assertEqual(2, mock_rm.call_count)
@mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over', @mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over',
return_value=True) return_value=True)
def test_initialize_connection_vol_failed_over(self, mock_fo): def test_initialize_connection_vol_failed_over(self, mock_fo):
@ -6201,6 +6397,57 @@ class VMAXCommonReplicationTest(test.TestCase):
self.data.test_volume, self.data.connector) self.data.test_volume, self.data.connector)
mock_es.assert_called_once_with(extra_specs, rep_config) mock_es.assert_called_once_with(extra_specs, rep_config)
@mock.patch.object(utils.VMAXUtils, 'is_metro_device', return_value=True)
def test_initialize_connection_vol_metro(self, mock_md):
metro_connector = deepcopy(self.data.connector)
metro_connector['multipath'] = True
info_dict = self.common.initialize_connection(
self.data.test_volume, metro_connector)
ref_dict = {'array': self.data.array,
'device_id': self.data.device_id,
'hostlunid': 3,
'maskingview': self.data.masking_view_name_f,
'metro_hostlunid': 3}
self.assertEqual(ref_dict, info_dict)
@mock.patch.object(rest.VMAXRest, 'get_iscsi_ip_address_and_iqn',
return_value=([VMAXCommonData.ip],
VMAXCommonData.initiator))
@mock.patch.object(common.VMAXCommon, '_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs)
@mock.patch.object(utils.VMAXUtils, 'is_metro_device', return_value=True)
def test_initialize_connection_vol_metro_iscsi(self, mock_md, mock_es,
mock_ip):
metro_connector = deepcopy(self.data.connector)
metro_connector['multipath'] = True
info_dict = self.iscsi_common.initialize_connection(
self.data.test_volume, metro_connector)
ref_dict = {'array': self.data.array,
'device_id': self.data.device_id,
'hostlunid': 3,
'maskingview': self.data.masking_view_name_f,
'ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}],
'metro_hostlunid': 3,
'is_multipath': True,
'metro_ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}]}
self.assertEqual(ref_dict, info_dict)
@mock.patch.object(utils.VMAXUtils, 'is_metro_device', return_value=True)
def test_initialize_connection_no_multipath_iscsi(self, mock_md):
info_dict = self.iscsi_common.initialize_connection(
self.data.test_volume, self.data.connector)
self.assertIsNone(info_dict)
def test_attach_metro_volume(self):
rep_extra_specs = deepcopy(VMAXCommonData.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
hostlunid, remote_port_group = self.common._attach_metro_volume(
self.data.test_volume, self.data.connector,
self.data.extra_specs, rep_extra_specs)
self.assertEqual(self.data.port_group_name_f, remote_port_group)
@mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session', @mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session',
return_value=(False, False, None)) return_value=(False, False, None))
@mock.patch.object(common.VMAXCommon, 'extend_volume_is_replicated') @mock.patch.object(common.VMAXCommon, 'extend_volume_is_replicated')
@ -6267,27 +6514,25 @@ class VMAXCommonReplicationTest(test.TestCase):
'device_id': self.data.device_id2}, rep_data) 'device_id': self.data.device_id2}, rep_data)
mock_create.assert_not_called() mock_create.assert_not_called()
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
@mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target')
def test_cleanup_lun_replication_success(self, mock_clean, mock_rm): def test_cleanup_lun_replication_success(self, mock_clean):
rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common.cleanup_lun_replication( self.common.cleanup_lun_replication(
self.data.test_volume, "1", self.data.device_id, self.data.test_volume, "1", self.data.device_id,
self.extra_specs) self.extra_specs)
mock_clean.assert_called_once_with( mock_clean.assert_called_once_with(
self.data.array, self.data.remote_array, self.data.device_id, self.data.array, self.data.test_volume,
self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no, "1", self.data.device_id2, self.data.rdf_group_no, "1",
rep_extra_specs) rep_extra_specs)
mock_rm.assert_called_once_with(
self.data.remote_array, self.data.test_volume,
self.data.device_id2, "1", rep_extra_specs, False)
# Cleanup legacy replication # Cleanup legacy replication
self.common.cleanup_lun_replication( self.common.cleanup_lun_replication(
self.data.test_legacy_vol, "1", self.data.device_id, self.data.test_legacy_vol, "1", self.data.device_id,
self.extra_specs) self.extra_specs)
mock_clean.assert_called_once_with( mock_clean.assert_called_once_with(
self.data.array, self.data.remote_array, self.data.device_id, self.data.array, self.data.test_volume,
self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no, "1", self.data.device_id2, self.data.rdf_group_no, "1",
rep_extra_specs) rep_extra_specs)
@ -6300,30 +6545,86 @@ class VMAXCommonReplicationTest(test.TestCase):
self.extra_specs) self.extra_specs)
mock_clean.assert_not_called() mock_clean.assert_not_called()
def test_cleanup_lun_replication_exception(self): @mock.patch.object(
common.VMAXCommon, 'get_remote_target_device',
return_value=(VMAXCommonData.device_id2, '', '', '', ''))
@mock.patch.object(common.VMAXCommon,
'_add_volume_to_async_rdf_managed_grp')
def test_cleanup_lun_replication_exception(self, mock_add, mock_tgt):
self.assertRaises(exception.VolumeBackendAPIException, self.assertRaises(exception.VolumeBackendAPIException,
self.common.cleanup_lun_replication, self.common.cleanup_lun_replication,
self.data.test_volume, "1", self.data.device_id, self.data.test_volume, "1", self.data.device_id,
self.extra_specs) self.extra_specs)
# is metro or async volume
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.REP_MODE] = utils.REP_METRO
self.assertRaises(exception.VolumeBackendAPIException,
self.common.cleanup_lun_replication,
self.data.test_volume, "1", self.data.device_id,
extra_specs)
mock_add.assert_called_once()
@mock.patch.object(common.VMAXCommon, '_cleanup_metro_target')
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
@mock.patch.object(common.VMAXCommon, '_delete_from_srp') @mock.patch.object(common.VMAXCommon, '_delete_from_srp')
@mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship') @mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship')
def test_cleanup_remote_target(self, mock_break, mock_del): def test_cleanup_remote_target(self, mock_break, mock_del,
mock_rm, mock_clean_metro):
with mock.patch.object(self.rest, 'are_vols_rdf_paired', with mock.patch.object(self.rest, 'are_vols_rdf_paired',
return_value=(False, '', '')): return_value=(False, '', '')):
self.common._cleanup_remote_target( self.common._cleanup_remote_target(
self.data.array, self.data.remote_array, self.data.device_id, self.data.array, self.data.test_volume,
self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name, self.data.device_id2, self.data.rdf_group_name,
"vol1", self.data.rep_extra_specs) "vol1", self.data.rep_extra_specs)
mock_break.assert_not_called() mock_break.assert_not_called()
self.common._cleanup_remote_target( self.common._cleanup_remote_target(
self.data.array, self.data.remote_array, self.data.device_id, self.data.array, self.data.test_volume,
self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name, self.data.device_id2, self.data.rdf_group_name,
"vol1", self.data.rep_extra_specs) "vol1", self.data.rep_extra_specs)
mock_break.assert_called_once_with( mock_break.assert_called_once_with(
self.data.array, self.data.device_id, self.data.array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name, self.data.device_id2, self.data.rdf_group_name,
self.data.rep_extra_specs, "Synchronized") self.data.rep_extra_specs, "Synchronized")
# is metro volume
with mock.patch.object(self.utils, 'is_metro_device',
return_value=True):
self.common._cleanup_remote_target(
self.data.array, self.data.test_volume,
self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name,
"vol1", self.data.rep_extra_specs)
mock_clean_metro.assert_called_once()
def test_cleanup_remote_target_exception(self):
extra_specs = deepcopy(self.data.rep_extra_specs)
extra_specs['mode'] = utils.REP_METRO
self.assertRaises(exception.VolumeBackendAPIException,
self.metro_driver.common._cleanup_remote_target,
self.data.array, self.data.test_volume,
self.data.remote_array,
self.data.device_id, self.data.device_id2,
self.data.rdf_group_name, "vol1", extra_specs)
@mock.patch.object(provision.VMAXProvision, 'enable_group_replication')
@mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg',
side_effect=[2, 0])
def test_cleanup_metro_target(self, mock_vols, mock_enable):
# allow delete is True
specs = {'allow_del_metro': True}
for x in range(0, 2):
self.common._cleanup_metro_target(
self.data.array, self.data.device_id, self.data.device_id2,
self.data.rdf_group_no, specs)
mock_enable.assert_called_once()
# allow delete is False
specs['allow_del_metro'] = False
self.assertRaises(exception.VolumeBackendAPIException,
self.common._cleanup_metro_target,
self.data.array, self.data.device_id,
self.data.device_id2,
self.data.rdf_group_no, specs)
@mock.patch.object(common.VMAXCommon, @mock.patch.object(common.VMAXCommon,
'_remove_vol_and_cleanup_replication') '_remove_vol_and_cleanup_replication')
@ -6430,7 +6731,7 @@ class VMAXCommonReplicationTest(test.TestCase):
side_effect=[None, VMAXCommonData.device_id, side_effect=[None, VMAXCommonData.device_id,
VMAXCommonData.device_id, VMAXCommonData.device_id]) VMAXCommonData.device_id, VMAXCommonData.device_id])
@mock.patch.object( @mock.patch.object(
common.VMAXCommon, 'get_masking_views_from_volume', common.VMAXCommon, '_get_masking_views_from_volume',
side_effect=['OS-host-MV', None, exception.VolumeBackendAPIException]) side_effect=['OS-host-MV', None, exception.VolumeBackendAPIException])
def test_recover_volumes_on_failback(self, mock_mv, mock_dev): def test_recover_volumes_on_failback(self, mock_mv, mock_dev):
recovery1 = self.common.recover_volumes_on_failback( recovery1 = self.common.recover_volumes_on_failback(
@ -6498,6 +6799,13 @@ class VMAXCommonReplicationTest(test.TestCase):
self.data.failed_resource, self.data.test_volume, self.data.failed_resource, self.data.test_volume,
self.data.device_id, 'vol1', '1', self.data.device_id, 'vol1', '1',
self.data.extra_specs_rep_enabled) self.data.extra_specs_rep_enabled)
with mock.patch.object(self.utils, 'is_metro_device',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume_is_replicated,
self.data.array, self.data.test_volume,
self.data.device_id, 'vol1', '1',
self.data.extra_specs_rep_enabled)
@mock.patch.object(common.VMAXCommon, 'add_volume_to_replication_group') @mock.patch.object(common.VMAXCommon, 'add_volume_to_replication_group')
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
@ -6545,14 +6853,12 @@ class VMAXCommonReplicationTest(test.TestCase):
# Path one - disable compression # Path one - disable compression
extra_specs1 = deepcopy(self.extra_specs) extra_specs1 = deepcopy(self.extra_specs)
extra_specs1[utils.DISABLECOMPRESSION] = "true" extra_specs1[utils.DISABLECOMPRESSION] = "true"
ref_specs1 = deepcopy(self.data.rep_extra_specs) ref_specs1 = deepcopy(self.data.rep_extra_specs2)
ref_specs1[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_extra_specs1 = self.common._get_replication_extra_specs( rep_extra_specs1 = self.common._get_replication_extra_specs(
extra_specs1, rep_config) extra_specs1, rep_config)
self.assertEqual(ref_specs1, rep_extra_specs1) self.assertEqual(ref_specs1, rep_extra_specs1)
# Path two - disable compression, not all flash # Path two - disable compression, not all flash
ref_specs2 = deepcopy(self.data.rep_extra_specs) ref_specs2 = deepcopy(self.data.rep_extra_specs2)
ref_specs2[utils.PORTGROUPNAME] = self.data.port_group_name_f
with mock.patch.object(self.rest, 'is_compression_capable', with mock.patch.object(self.rest, 'is_compression_capable',
return_value=False): return_value=False):
rep_extra_specs2 = self.common._get_replication_extra_specs( rep_extra_specs2 = self.common._get_replication_extra_specs(

View File

@ -187,6 +187,9 @@ class VMAXCommon(object):
if self.active_backend_id == self.rep_config['array']: if self.active_backend_id == self.rep_config['array']:
self.failover = True self.failover = True
self.extend_replicated_vol = self.rep_config['allow_extend'] self.extend_replicated_vol = self.rep_config['allow_extend']
self.allow_delete_metro = (
self.rep_config['allow_delete_metro']
if self.rep_config.get('allow_delete_metro') else False)
# use self.replication_enabled for update_volume_stats # use self.replication_enabled for update_volume_stats
self.replication_enabled = True self.replication_enabled = True
LOG.debug("The replication configuration is %(rep_config)s.", LOG.debug("The replication configuration is %(rep_config)s.",
@ -466,9 +469,10 @@ class VMAXCommon(object):
:param connector: the connector Object :param connector: the connector Object
""" """
extra_specs = self._initial_setup(volume) extra_specs = self._initial_setup(volume)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, self.rep_config)
if self.utils.is_volume_failed_over(volume): if self.utils.is_volume_failed_over(volume):
extra_specs = self._get_replication_extra_specs( extra_specs = rep_extra_specs
extra_specs, self.rep_config)
volume_name = volume.name volume_name = volume.name
async_grp = None async_grp = None
LOG.info("Unmap volume: %(volume)s.", LOG.info("Unmap volume: %(volume)s.",
@ -492,8 +496,7 @@ class VMAXCommon(object):
return return
source_nf_sg = None source_nf_sg = None
array = extra_specs[utils.ARRAY] array = extra_specs[utils.ARRAY]
if (self.utils.is_replication_enabled(extra_specs) and if self.utils.does_vol_need_rdf_management_group(extra_specs):
extra_specs.get(utils.REP_MODE, None) == utils.REP_ASYNC):
async_grp = self.utils.get_async_rdf_managed_grp_name( async_grp = self.utils.get_async_rdf_managed_grp_name(
self.rep_config) self.rep_config)
if len(source_storage_group_list) > 1: if len(source_storage_group_list) > 1:
@ -504,11 +507,25 @@ class VMAXCommon(object):
if source_nf_sg: if source_nf_sg:
# Remove volume from non fast storage group # Remove volume from non fast storage group
self.masking.remove_volume_from_sg( self.masking.remove_volume_from_sg(
array, device_info['device_id'], volume_name, storage_group, array, device_info['device_id'], volume_name, source_nf_sg,
extra_specs) extra_specs)
else: else:
self._remove_members(array, volume, device_info['device_id'], self._remove_members(array, volume, device_info['device_id'],
extra_specs, connector, async_grp=async_grp) extra_specs, connector, async_grp=async_grp)
if self.utils.is_metro_device(self.rep_config, extra_specs):
# Need to remove from remote masking view
device_info, __, __ = (self.find_host_lun_id(
volume, host, extra_specs, rep_extra_specs))
if 'hostlunid' in device_info:
self._remove_members(
rep_extra_specs[utils.ARRAY], volume,
device_info['device_id'],
rep_extra_specs, connector, async_grp=async_grp)
else:
# Make an attempt to clean up initiator group
self.masking.attempt_ig_cleanup(
connector, self.protocol, rep_extra_specs[utils.ARRAY],
True)
def initialize_connection(self, volume, connector): def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info. """Initializes the connection and returns device and connection info.
@ -540,13 +557,20 @@ class VMAXCommon(object):
""" """
extra_specs = self._initial_setup(volume) extra_specs = self._initial_setup(volume)
is_multipath = connector.get('multipath', False) is_multipath = connector.get('multipath', False)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, self.rep_config)
remote_port_group = None
volume_name = volume.name volume_name = volume.name
LOG.info("Initialize connection: %(volume)s.", LOG.info("Initialize connection: %(volume)s.",
{'volume': volume_name}) {'volume': volume_name})
if (self.utils.is_metro_device(self.rep_config, extra_specs)
and not is_multipath and self.protocol.lower() == 'iscsi'):
LOG.warning("Multipathing is not correctly enabled "
"on your system.")
return
if self.utils.is_volume_failed_over(volume): if self.utils.is_volume_failed_over(volume):
extra_specs = self._get_replication_extra_specs( extra_specs = rep_extra_specs
extra_specs, self.rep_config)
device_info_dict, is_live_migration, source_storage_group_list = ( device_info_dict, is_live_migration, source_storage_group_list = (
self.find_host_lun_id(volume, connector['host'], extra_specs)) self.find_host_lun_id(volume, connector['host'], extra_specs))
masking_view_dict = self._populate_masking_dict( masking_view_dict = self._populate_masking_dict(
@ -565,6 +589,21 @@ class VMAXCommon(object):
self.get_port_group_from_masking_view( self.get_port_group_from_masking_view(
extra_specs[utils.ARRAY], extra_specs[utils.ARRAY],
device_info_dict['maskingview'])) device_info_dict['maskingview']))
if self.utils.is_metro_device(self.rep_config, extra_specs):
remote_info_dict, __, __ = (
self.find_host_lun_id(volume, connector['host'],
extra_specs, rep_extra_specs))
if remote_info_dict.get('hostlunid') is None:
# Need to attach on remote side
metro_host_lun, remote_port_group = (
self._attach_metro_volume(
volume, connector, extra_specs, rep_extra_specs))
else:
metro_host_lun = remote_info_dict['hostlunid']
remote_port_group = self.get_port_group_from_masking_view(
rep_extra_specs[utils.ARRAY],
remote_info_dict['maskingview'])
device_info_dict['metro_hostlunid'] = metro_host_lun
else: else:
if is_live_migration: if is_live_migration:
@ -597,6 +636,11 @@ class VMAXCommon(object):
self._attach_volume( self._attach_volume(
volume, connector, extra_specs, masking_view_dict, volume, connector, extra_specs, masking_view_dict,
is_live_migration)) is_live_migration))
if self.utils.is_metro_device(self.rep_config, extra_specs):
# Need to attach on remote side
metro_host_lun, remote_port_group = self._attach_metro_volume(
volume, connector, extra_specs, rep_extra_specs)
device_info_dict['metro_hostlunid'] = metro_host_lun
if is_live_migration: if is_live_migration:
self.masking.post_live_migration( self.masking.post_live_migration(
masking_view_dict, extra_specs) masking_view_dict, extra_specs)
@ -604,17 +648,46 @@ class VMAXCommon(object):
device_info_dict['ip_and_iqn'] = ( device_info_dict['ip_and_iqn'] = (
self._find_ip_and_iqns( self._find_ip_and_iqns(
extra_specs[utils.ARRAY], port_group_name)) extra_specs[utils.ARRAY], port_group_name))
if self.utils.is_metro_device(self.rep_config, extra_specs):
device_info_dict['metro_ip_and_iqn'] = (
self._find_ip_and_iqns(
rep_extra_specs[utils.ARRAY], remote_port_group))
device_info_dict['is_multipath'] = is_multipath device_info_dict['is_multipath'] = is_multipath
return device_info_dict return device_info_dict
def _attach_metro_volume(self, volume, connector,
extra_specs, rep_extra_specs):
"""Helper method to attach a metro volume.
Metro protected volumes point to two VMAX devices on different arrays,
which are presented as a single device to the host. This method
masks the remote device to the host.
:param volume: the volume object
:param connector: the connector dict
:param rep_extra_specs: replication extra specifications
:return: hostlunid, remote_port_group
"""
remote_mv_dict = self._populate_masking_dict(
volume, connector, extra_specs, rep_extra_specs)
remote_info_dict, remote_port_group = (
self._attach_volume(
volume, connector, extra_specs, remote_mv_dict,
rep_extra_specs=rep_extra_specs))
remote_port_group = self.get_port_group_from_masking_view(
rep_extra_specs[utils.ARRAY], remote_info_dict['maskingview'])
return remote_info_dict['hostlunid'], remote_port_group
def _attach_volume(self, volume, connector, extra_specs, def _attach_volume(self, volume, connector, extra_specs,
masking_view_dict, is_live_migration=False): masking_view_dict, is_live_migration=False,
rep_extra_specs=None):
"""Attach a volume to a host. """Attach a volume to a host.
:param volume: the volume object :param volume: the volume object
:param connector: the connector object :param connector: the connector object
:param extra_specs: extra specifications :param extra_specs: extra specifications
:param masking_view_dict: masking view information :param masking_view_dict: masking view information
:param is_live_migration: flag to indicate live migration
:param rep_extra_specs: rep extra specs are passed if metro device
:returns: dict -- device_info_dict :returns: dict -- device_info_dict
String -- port group name String -- port group name
:raises: VolumeBackendAPIException :raises: VolumeBackendAPIException
@ -624,14 +697,15 @@ class VMAXCommon(object):
masking_view_dict['isLiveMigration'] = True masking_view_dict['isLiveMigration'] = True
else: else:
masking_view_dict['isLiveMigration'] = False masking_view_dict['isLiveMigration'] = False
m_specs = extra_specs if rep_extra_specs is None else rep_extra_specs
rollback_dict = self.masking.setup_masking_view( rollback_dict = self.masking.setup_masking_view(
masking_view_dict[utils.ARRAY], volume, masking_view_dict[utils.ARRAY], volume,
masking_view_dict, extra_specs) masking_view_dict, m_specs)
# Find host lun id again after the volume is exported to the host. # Find host lun id again after the volume is exported to the host.
device_info_dict, __, __ = self.find_host_lun_id( device_info_dict, __, __ = self.find_host_lun_id(
volume, connector['host'], extra_specs) volume, connector['host'], extra_specs, rep_extra_specs)
if 'hostlunid' not in device_info_dict: if 'hostlunid' not in device_info_dict:
# Did not successfully attach to host, # Did not successfully attach to host,
# so a rollback for FAST is required. # so a rollback for FAST is required.
@ -639,7 +713,7 @@ class VMAXCommon(object):
"Cannot retrieve hostlunid. ", "Cannot retrieve hostlunid. ",
{'vol': volume_name}) {'vol': volume_name})
self.masking.check_if_rollback_action_for_masking_required( self.masking.check_if_rollback_action_for_masking_required(
masking_view_dict[utils.ARRAY], masking_view_dict[utils.ARRAY], volume,
masking_view_dict[utils.DEVICE_ID], masking_view_dict[utils.DEVICE_ID],
rollback_dict) rollback_dict)
exception_message = (_("Error Attaching volume %(vol)s.") exception_message = (_("Error Attaching volume %(vol)s.")
@ -901,6 +975,9 @@ class VMAXCommon(object):
extra_specs[utils.IS_RE] = True extra_specs[utils.IS_RE] = True
if self.rep_config and self.rep_config.get('mode'): if self.rep_config and self.rep_config.get('mode'):
extra_specs[utils.REP_MODE] = self.rep_config['mode'] extra_specs[utils.REP_MODE] = self.rep_config['mode']
if self.rep_config and self.rep_config.get(utils.METROBIAS):
extra_specs[utils.METROBIAS] = self.rep_config[
utils.METROBIAS]
if register_config_file: if register_config_file:
config_file = self._register_config_file_from_config_group( config_file = self._register_config_file_from_config_group(
config_group) config_group)
@ -941,25 +1018,31 @@ class VMAXCommon(object):
return founddevice_id return founddevice_id
def find_host_lun_id(self, volume, host, extra_specs): def find_host_lun_id(self, volume, host, extra_specs,
rep_extra_specs=None):
"""Given the volume dict find the host lun id for a volume. """Given the volume dict find the host lun id for a volume.
:param volume: the volume dict :param volume: the volume dict
:param host: host from connector (can be None on a force-detach) :param host: host from connector (can be None on a force-detach)
:param extra_specs: the extra specs :param extra_specs: the extra specs
:param rep_extra_specs: rep extra specs, passed in if metro device
:returns: dict -- the data dict :returns: dict -- the data dict
""" """
maskedvols = {} maskedvols = {}
is_live_migration = False is_live_migration = False
volume_name = volume.name volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs) device_id = self._find_device_on_array(volume, extra_specs)
if rep_extra_specs is not None:
device_id = self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id)[0]
extra_specs = rep_extra_specs
host_name = self.utils.get_host_short_name(host) if host else None host_name = self.utils.get_host_short_name(host) if host else None
if device_id: if device_id:
array = extra_specs[utils.ARRAY] array = extra_specs[utils.ARRAY]
source_storage_group_list = ( source_storage_group_list = (
self.rest.get_storage_groups_from_volume(array, device_id)) self.rest.get_storage_groups_from_volume(array, device_id))
# return only masking views for this host # return only masking views for this host
maskingviews = self.get_masking_views_from_volume( maskingviews = self._get_masking_views_from_volume(
array, device_id, host_name, source_storage_group_list) array, device_id, host_name, source_storage_group_list)
for maskingview in maskingviews: for maskingview in maskingviews:
@ -1002,9 +1085,25 @@ class VMAXCommon(object):
return maskedvols, is_live_migration, source_storage_group_list return maskedvols, is_live_migration, source_storage_group_list
def get_masking_views_from_volume(self, array, device_id, host, def get_masking_views_from_volume(self, array, volume, device_id, host):
storage_group_list=None): """Get all masking views from a volume.
"""Retrieve masking view list for a volume.
:param array: array serial number
:param volume: the volume object
:param device_id: the volume device id
:param host: the host
:return: masking view list, is metro
"""
is_metro = False
extra_specs = self._initial_setup(volume)
mv_list = self._get_masking_views_from_volume(array, device_id, host)
if self.utils.is_metro_device(self.rep_config, extra_specs):
is_metro = True
return mv_list, is_metro
def _get_masking_views_from_volume(self, array, device_id, host,
storage_group_list=None):
"""Helper function to retrieve masking view list for a volume.
:param array: array serial number :param array: array serial number
:param device_id: the volume device id :param device_id: the volume device id
@ -1112,15 +1211,29 @@ class VMAXCommon(object):
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)
return extra_specs return extra_specs
def _populate_masking_dict(self, volume, connector, extra_specs): def _populate_masking_dict(self, volume, connector,
extra_specs, rep_extra_specs=None):
"""Get all the names of the maskingview and sub-components. """Get all the names of the maskingview and sub-components.
:param volume: the volume object :param volume: the volume object
:param connector: the connector object :param connector: the connector object
:param extra_specs: extra specifications :param extra_specs: extra specifications
:param rep_extra_specs: replication extra specs, if metro volume
:returns: dict -- a dictionary with masking view information :returns: dict -- a dictionary with masking view information
""" """
masking_view_dict = {} masking_view_dict = {}
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
if rep_extra_specs is not None:
device_id = self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id)[0]
extra_specs = rep_extra_specs
if not device_id:
exception_message = (_("Cannot retrieve volume %(vol)s "
"from the array. ") % {'vol': volume_name})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(exception_message)
host_name = connector['host'] host_name = connector['host']
unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12) unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12)
protocol = self.utils.get_short_protocol_type(self.protocol) protocol = self.utils.get_short_protocol_type(self.protocol)
@ -1178,13 +1291,6 @@ class VMAXCommon(object):
masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG" masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG"
% {'prefix': mv_prefix}) % {'prefix': mv_prefix})
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
if not device_id:
exception_message = (_("Cannot retrieve volume %(vol)s "
"from the array. ") % {'vol': volume_name})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(exception_message)
masking_view_dict[utils.IG_NAME] = ( masking_view_dict[utils.IG_NAME] = (
("OS-%(shortHostName)s-%(protocol)s-IG" ("OS-%(shortHostName)s-%(protocol)s-IG"
@ -1550,13 +1656,13 @@ class VMAXCommon(object):
:param extra_specs: the extra specifications :param extra_specs: the extra specifications
:param volume: the volume object :param volume: the volume object
""" """
# Remove from any storage groups
self.masking.remove_and_reset_members(
array, volume, device_id, volume_name, extra_specs, False)
# Cleanup remote replication # Cleanup remote replication
if self.utils.is_replication_enabled(extra_specs): if self.utils.is_replication_enabled(extra_specs):
self.cleanup_lun_replication(volume, volume_name, self.cleanup_lun_replication(volume, volume_name,
device_id, extra_specs) device_id, extra_specs)
# Remove from any storage groups
self.masking.remove_and_reset_members(
array, volume, device_id, volume_name, extra_specs, False)
def get_target_wwns_from_masking_view( def get_target_wwns_from_masking_view(
self, volume, connector): self, volume, connector):
@ -1566,16 +1672,36 @@ class VMAXCommon(object):
:param connector: the connector dict :param connector: the connector dict
:returns: list -- the target WWN list :returns: list -- the target WWN list
""" """
target_wwns = [] metro_wwns = []
host = connector['host'] host = connector['host']
short_host_name = self.utils.get_host_short_name(host) short_host_name = self.utils.get_host_short_name(host)
extra_specs = self._initial_setup(volume) extra_specs = self._initial_setup(volume)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, self.rep_config)
if self.utils.is_volume_failed_over(volume): if self.utils.is_volume_failed_over(volume):
extra_specs = self._get_replication_extra_specs( extra_specs = rep_extra_specs
extra_specs, self.rep_config)
array = extra_specs[utils.ARRAY]
device_id = self._find_device_on_array(volume, extra_specs) device_id = self._find_device_on_array(volume, extra_specs)
masking_view_list = self.get_masking_views_from_volume( target_wwns = self._get_target_wwns_from_masking_view(
device_id, short_host_name, extra_specs)
if self.utils.is_metro_device(self.rep_config, extra_specs):
remote_device_id = self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id)[0]
metro_wwns = self._get_target_wwns_from_masking_view(
remote_device_id, short_host_name, rep_extra_specs)
return target_wwns, metro_wwns
def _get_target_wwns_from_masking_view(
self, device_id, short_host_name, extra_specs):
"""Helper function to get wwns from a masking view.
:param device_id: the device id
:param short_host_name: the short host name
:param extra_specs: the extra specs
:return: target wwns -- list
"""
target_wwns = []
array = extra_specs[utils.ARRAY]
masking_view_list = self._get_masking_views_from_volume(
array, device_id, short_host_name) array, device_id, short_host_name)
if masking_view_list is not None: if masking_view_list is not None:
portgroup = self.get_port_group_from_masking_view( portgroup = self.get_port_group_from_masking_view(
@ -2195,8 +2321,7 @@ class VMAXCommon(object):
array, volume, device_id, rdf_group_no, self.rep_config, array, volume, device_id, rdf_group_no, self.rep_config,
target_name, remote_array, target_device_id, extra_specs) target_name, remote_array, target_device_id, extra_specs)
rep_mode = extra_specs.get(utils.REP_MODE, None) if self.utils.does_vol_need_rdf_management_group(extra_specs):
if rep_mode == utils.REP_ASYNC:
self._add_volume_to_async_rdf_managed_grp( self._add_volume_to_async_rdf_managed_grp(
array, device_id, source_name, remote_array, array, device_id, source_name, remote_array,
target_device_id, extra_specs) target_device_id, extra_specs)
@ -2245,7 +2370,7 @@ class VMAXCommon(object):
device_id, extra_specs): device_id, extra_specs):
"""Cleanup target volume on delete. """Cleanup target volume on delete.
Extra logic if target is last in group. Extra logic if target is last in group, or is a metro volume.
:param volume: the volume object :param volume: the volume object
:param volume_name: the volume name :param volume_name: the volume name
:param device_id: the device id :param device_id: the device id
@ -2274,11 +2399,8 @@ class VMAXCommon(object):
if target_device is not None: if target_device is not None:
# Clean-up target # Clean-up target
self.masking.remove_and_reset_members(
remote_array, volume, target_device, volume_name,
rep_extra_specs, False)
self._cleanup_remote_target( self._cleanup_remote_target(
array, remote_array, device_id, target_device, array, volume, remote_array, device_id, target_device,
rdf_group_no, volume_name, rep_extra_specs) rdf_group_no, volume_name, rep_extra_specs)
LOG.info('Successfully destroyed replication for ' LOG.info('Successfully destroyed replication for '
'volume: %(volume)s', 'volume: %(volume)s',
@ -2288,7 +2410,8 @@ class VMAXCommon(object):
'replication-enabled volume: %(volume)s', 'replication-enabled volume: %(volume)s',
{'volume': volume_name}) {'volume': volume_name})
except Exception as e: except Exception as e:
if extra_specs.get(utils.REP_MODE, None) == utils.REP_ASYNC: if extra_specs.get(utils.REP_MODE, None) in [
utils.REP_ASYNC, utils.REP_METRO]:
(target_device, remote_array, rdf_group_no, (target_device, remote_array, rdf_group_no,
local_vol_state, pair_state) = ( local_vol_state, pair_state) = (
self.get_remote_target_device( self.get_remote_target_device(
@ -2309,11 +2432,12 @@ class VMAXCommon(object):
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)
def _cleanup_remote_target( def _cleanup_remote_target(
self, array, remote_array, device_id, target_device, self, array, volume, remote_array, device_id, target_device,
rdf_group, volume_name, rep_extra_specs): rdf_group, volume_name, rep_extra_specs):
"""Clean-up remote replication target after exception or on deletion. """Clean-up remote replication target after exception or on deletion.
:param array: the array serial number :param array: the array serial number
:param volume: the volume object
:param remote_array: the remote array serial number :param remote_array: the remote array serial number
:param device_id: the source device id :param device_id: the source device id
:param target_device: the target device id :param target_device: the target device id
@ -2321,17 +2445,66 @@ class VMAXCommon(object):
:param volume_name: the volume name :param volume_name: the volume name
:param rep_extra_specs: replication extra specifications :param rep_extra_specs: replication extra specifications
""" """
self.masking.remove_and_reset_members(
remote_array, volume, target_device, volume_name,
rep_extra_specs, False)
are_vols_paired, local_vol_state, pair_state = ( are_vols_paired, local_vol_state, pair_state = (
self.rest.are_vols_rdf_paired( self.rest.are_vols_rdf_paired(
array, remote_array, device_id, target_device)) array, remote_array, device_id, target_device))
if are_vols_paired: if are_vols_paired:
# Break the sync relationship. is_metro = self.utils.is_metro_device(
self.provision.break_rdf_relationship( self.rep_config, rep_extra_specs)
array, device_id, target_device, rdf_group, if is_metro:
rep_extra_specs, pair_state) rep_extra_specs['allow_del_metro'] = self.allow_delete_metro
self._cleanup_metro_target(
array, device_id, target_device,
rdf_group, rep_extra_specs)
else:
# Break the sync relationship.
self.provision.break_rdf_relationship(
array, device_id, target_device, rdf_group,
rep_extra_specs, pair_state)
self._delete_from_srp( self._delete_from_srp(
remote_array, target_device, volume_name, rep_extra_specs) remote_array, target_device, volume_name, rep_extra_specs)
@coordination.synchronized('emc-rg-{rdf_group}')
def _cleanup_metro_target(self, array, device_id, target_device,
rdf_group, rep_extra_specs):
"""Helper function to cleanup a metro remote target.
:param array: the array serial number
:param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: the rep extra specs
"""
if rep_extra_specs['allow_del_metro']:
metro_grp = self.utils.get_async_rdf_managed_grp_name(
self.rep_config)
self.provision.break_metro_rdf_pair(
array, device_id, target_device, rdf_group,
rep_extra_specs, metro_grp)
# Remove the volume from the metro_grp
self.masking.remove_volume_from_sg(array, device_id, 'metro_vol',
metro_grp, rep_extra_specs)
# Resume I/O on the RDF links for any remaining volumes
if self.rest.get_num_vols_in_sg(array, metro_grp) > 0:
LOG.info("Resuming I/O for all volumes in the RDF group: "
"%(rdfg)s", {'rdfg': device_id})
self.provision.enable_group_replication(
array, metro_grp, rdf_group,
rep_extra_specs, establish=True)
else:
exception_message = (
_("Deleting a Metro-protected replicated volume is "
"not permitted on this backend %(backend)s. "
"Please contact your administrator.")
% {'backend': self.configuration.safe_get(
'volume_backend_name')})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
def _cleanup_replication_source( def _cleanup_replication_source(
self, array, volume, volume_name, volume_dict, extra_specs): self, array, volume, volume_name, volume_dict, extra_specs):
"""Cleanup a remote replication source volume on failure. """Cleanup a remote replication source volume on failure.
@ -2554,8 +2727,8 @@ class VMAXCommon(object):
volume_update['updates'] = {'status': 'error'} volume_update['updates'] = {'status': 'error'}
else: else:
try: try:
maskingview = self.get_masking_views_from_volume( maskingview = self._get_masking_views_from_volume(
extra_specs[utils.ARRAY], device_id, '') extra_specs[utils.ARRAY], device_id, None)
except Exception: except Exception:
maskingview = None maskingview = None
LOG.debug("Unable to determine if volume is in masking view.") LOG.debug("Unable to determine if volume is in masking view.")
@ -2599,8 +2772,10 @@ class VMAXCommon(object):
new_size, extra_specs): new_size, extra_specs):
"""Extend a replication-enabled volume. """Extend a replication-enabled volume.
Cannot extend volumes in a synchronization pair. Must first break the Cannot extend volumes in a synchronization pair where the source
relationship, extend them separately, then recreate the pair and/or target arrays are running HyperMax versions < 5978, or for
Metro-enabled volumes. Must first break the relationship, extend
them separately, then recreate the pair.
:param array: the array serial number :param array: the array serial number
:param volume: the volume objcet :param volume: the volume objcet
:param device_id: the volume device id :param device_id: the volume device id
@ -2608,14 +2783,18 @@ class VMAXCommon(object):
:param new_size: the new size the volume should be :param new_size: the new size the volume should be
:param extra_specs: extra specifications :param extra_specs: extra specifications
""" """
ode_replication = False ode_replication, allow_extend = False, self.extend_replicated_vol
if self.utils.is_replication_enabled(extra_specs): if (self.rest.is_next_gen_array(array)
if self.rest.is_next_gen_array(array): and not self.utils.is_metro_device(
# Check if remote array is next gen self.rep_config, extra_specs)):
__, remote_array = self.get_rdf_details(array) # Check if remote array is next gen
if self.rest.is_next_gen_array(remote_array): __, remote_array = self.get_rdf_details(array)
ode_replication = True if self.rest.is_next_gen_array(remote_array):
if self.extend_replicated_vol is True or ode_replication is True: ode_replication = True
if (self.utils.is_metro_device(self.rep_config, extra_specs)
and not self.allow_delete_metro):
allow_extend = False
if allow_extend is True or ode_replication is True:
try: try:
(target_device, remote_array, rdf_group, (target_device, remote_array, rdf_group,
local_vol_state, pair_state) = ( local_vol_state, pair_state) = (
@ -2627,10 +2806,14 @@ class VMAXCommon(object):
# Volume must be removed from replication (storage) group # Volume must be removed from replication (storage) group
# before the replication relationship can be ended (cannot # before the replication relationship can be ended (cannot
# have a mix of replicated and non-replicated volumes as # have a mix of replicated and non-replicated volumes as
# the SRDF groups become unmanageable). # the SRDF groups become unmanageable), but
# leave the vol in metro management group for now
metro_grp = self.utils.get_async_rdf_managed_grp_name(
self.rep_config) if self.utils.is_metro_device(
self.rep_config, rep_extra_specs) else None
self.masking.remove_and_reset_members( self.masking.remove_and_reset_members(
array, volume, device_id, volume_name, array, volume, device_id, volume_name,
extra_specs, False) extra_specs, False, async_grp=metro_grp)
# Repeat on target side # Repeat on target side
self.masking.remove_and_reset_members( self.masking.remove_and_reset_members(
@ -2638,9 +2821,17 @@ class VMAXCommon(object):
rep_extra_specs, False) rep_extra_specs, False)
LOG.info("Breaking replication relationship...") LOG.info("Breaking replication relationship...")
self.provision.break_rdf_relationship( if self.utils.is_metro_device(
array, device_id, target_device, self.rep_config, rep_extra_specs):
rdf_group, rep_extra_specs, pair_state) rep_extra_specs['allow_del_metro'] = (
self.allow_delete_metro)
self._cleanup_metro_target(
array, device_id, target_device,
rdf_group, rep_extra_specs)
else:
self.provision.break_rdf_relationship(
array, device_id, target_device, rdf_group,
rep_extra_specs, pair_state)
# Extend the target volume # Extend the target volume
LOG.info("Extending target volume...") LOG.info("Extending target volume...")
@ -2710,6 +2901,9 @@ class VMAXCommon(object):
remote_array, volume, target_device, target_name, remote_array, volume, target_device, target_name,
rep_extra_specs, False) rep_extra_specs, False)
# Check if volume is a copy session target
self._sync_check(array, device_id, target_name,
extra_specs, tgt_only=True)
# Establish replication relationship # Establish replication relationship
rdf_dict = self.rest.create_rdf_device_pair( rdf_dict = self.rest.create_rdf_device_pair(
array, device_id, rdf_group_no, target_device, remote_array, array, device_id, rdf_group_no, target_device, remote_array,
@ -2729,11 +2923,8 @@ class VMAXCommon(object):
"volume and returning source volume to default storage " "volume and returning source volume to default storage "
"group. Volume name: %(name)s "), "group. Volume name: %(name)s "),
{'name': target_name}) {'name': target_name})
self.masking.remove_and_reset_members(
remote_array, volume, target_device, target_name,
rep_extra_specs, False)
self._cleanup_remote_target( self._cleanup_remote_target(
array, remote_array, device_id, target_device, array, volume, remote_array, device_id, target_device,
rdf_group_no, target_name, rep_extra_specs) rdf_group_no, target_name, rep_extra_specs)
# Re-throw the exception. # Re-throw the exception.
exception_message = (_("Remote replication failed with exception:" exception_message = (_("Remote replication failed with exception:"
@ -2787,6 +2978,9 @@ class VMAXCommon(object):
:param rep_config: the replication configuration :param rep_config: the replication configuration
:returns: repExtraSpecs - dict :returns: repExtraSpecs - dict
""" """
if not self.utils.is_replication_enabled(extra_specs):
# Skip this if the volume is not replicated
return
rep_extra_specs = deepcopy(extra_specs) rep_extra_specs = deepcopy(extra_specs)
rep_extra_specs[utils.ARRAY] = rep_config['array'] rep_extra_specs[utils.ARRAY] = rep_config['array']
rep_extra_specs[utils.SRP] = rep_config['srp'] rep_extra_specs[utils.SRP] = rep_config['srp']
@ -2876,9 +3070,10 @@ class VMAXCommon(object):
raise NotImplementedError() raise NotImplementedError()
if group.is_replicated: if group.is_replicated:
if (self.rep_config and self.rep_config.get('mode') if (self.rep_config and self.rep_config.get('mode')
and self.rep_config['mode'] == utils.REP_ASYNC): and self.rep_config['mode']
in [utils.REP_ASYNC, utils.REP_METRO]):
msg = _('Replication groups are not supported ' msg = _('Replication groups are not supported '
'for use with Asynchronous replication.') 'for use with Asynchronous replication or Metro.')
raise exception.InvalidInput(reason=msg) raise exception.InvalidInput(reason=msg)
model_update = {'status': fields.GroupStatus.AVAILABLE} model_update = {'status': fields.GroupStatus.AVAILABLE}

View File

@ -86,6 +86,7 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
3.1.0 - Support for replication groups (Tiramisu) 3.1.0 - Support for replication groups (Tiramisu)
- Deprecate backend xml configuration - Deprecate backend xml configuration
- Support for async replication (vmax-replication-enhancements) - Support for async replication (vmax-replication-enhancements)
- Support for SRDF/Metro (vmax-replication-enhancements)
""" """
VERSION = "3.1.0" VERSION = "3.1.0"
@ -228,7 +229,10 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
""" """
device_info = self.common.initialize_connection( device_info = self.common.initialize_connection(
volume, connector) volume, connector)
return self.populate_data(device_info, volume, connector) if device_info:
return self.populate_data(device_info, volume, connector)
else:
return {}
def populate_data(self, device_info, volume, connector): def populate_data(self, device_info, volume, connector):
"""Populate data dict. """Populate data dict.
@ -290,6 +294,7 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
loc = volume.provider_location loc = volume.provider_location
name = ast.literal_eval(loc) name = ast.literal_eval(loc)
host = connector['host'] host = connector['host']
zoning_mappings = {}
try: try:
array = name['array'] array = name['array']
device_id = name['device_id'] device_id = name['device_id']
@ -299,8 +304,9 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
LOG.debug("Start FC detach process for volume: %(volume)s.", LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume.name}) {'volume': volume.name})
masking_views = self.common.get_masking_views_from_volume( masking_views, is_metro = (
array, device_id, host) self.common.get_masking_views_from_volume(
array, volume, device_id, host))
if masking_views: if masking_views:
portgroup = ( portgroup = (
self.common.get_port_group_from_masking_view( self.common.get_port_group_from_masking_view(
@ -321,10 +327,33 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
'target_wwns': target_wwns, 'target_wwns': target_wwns,
'init_targ_map': init_targ_map, 'init_targ_map': init_targ_map,
'array': array} 'array': array}
else: if is_metro:
rep_data = volume.replication_driver_data
name = ast.literal_eval(rep_data)
try:
metro_array = name['array']
metro_device_id = name['device_id']
except KeyError:
LOG.error("Cannot get remote Metro device information "
"for zone cleanup. Attempting terminate "
"connection...")
else:
masking_views, __ = (
self.common.get_masking_views_from_volume(
metro_array, volume, metro_device_id, host))
if masking_views:
metro_portgroup = (
self.common.get_port_group_from_masking_view(
metro_array, masking_views[0]))
metro_ig = (
self.common.get_initiator_group_from_masking_view(
metro_array, masking_views[0]))
zoning_mappings.update(
{'metro_port_group': metro_portgroup,
'metro_ig': metro_ig, 'metro_array': metro_array})
if not masking_views:
LOG.warning("Volume %(volume)s is not in any masking view.", LOG.warning("Volume %(volume)s is not in any masking view.",
{'volume': volume.name}) {'volume': volume.name})
zoning_mappings = {}
return zoning_mappings return zoning_mappings
def _cleanup_zones(self, zoning_mappings): def _cleanup_zones(self, zoning_mappings):
@ -333,25 +362,35 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
:param zoning_mappings: zoning mapping dict :param zoning_mappings: zoning mapping dict
:returns: data - dict :returns: data - dict
""" """
LOG.debug("Looking for masking views still associated with " data = {'driver_volume_type': 'fibre_channel', 'data': {}}
"Port Group %s.", zoning_mappings['port_group']) try:
masking_views = self.common.get_common_masking_views( LOG.debug("Looking for masking views still associated with "
zoning_mappings['array'], zoning_mappings['port_group'], "Port Group %s.", zoning_mappings['port_group'])
zoning_mappings['initiator_group']) masking_views = self.common.get_common_masking_views(
zoning_mappings['array'], zoning_mappings['port_group'],
zoning_mappings['initiator_group'])
except (KeyError, ValueError, TypeError):
masking_views = []
if masking_views: if masking_views:
LOG.debug("Found %(numViews)d MaskingViews.", LOG.debug("Found %(numViews)d MaskingViews.",
{'numViews': len(masking_views)}) {'numViews': len(masking_views)})
data = {'driver_volume_type': 'fibre_channel', 'data': {}}
else: # no masking views found else: # no masking views found
LOG.debug("No MaskingViews were found. Deleting zone.") # Check if there any Metro masking views
data = {'driver_volume_type': 'fibre_channel', if zoning_mappings.get('metro_array'):
'data': {'target_wwn': zoning_mappings['target_wwns'], masking_views = self.common.get_common_masking_views(
'initiator_target_map': zoning_mappings['metro_array'],
zoning_mappings['init_targ_map']}} zoning_mappings['metro_port_group'],
zoning_mappings['metro_ig'])
if not masking_views:
LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': zoning_mappings['target_wwns'],
'initiator_target_map':
zoning_mappings['init_targ_map']}}
LOG.debug("Return FC data for zone removal: %(data)s.", LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data}) {'data': data})
return data return data
@ -364,10 +403,12 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
""" """
target_wwns, init_targ_map = [], {} target_wwns, init_targ_map = [], {}
initiator_wwns = connector['wwpns'] initiator_wwns = connector['wwpns']
fc_targets = self.common.get_target_wwns_from_masking_view( fc_targets, metro_fc_targets = (
volume, connector) self.common.get_target_wwns_from_masking_view(
volume, connector))
if self.zonemanager_lookup_service: if self.zonemanager_lookup_service:
fc_targets.extend(metro_fc_targets)
mapping = ( mapping = (
self.zonemanager_lookup_service. self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets)) get_device_mapping_from_network(initiator_wwns, fc_targets))
@ -378,8 +419,9 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
init_targ_map[initiator] = map_d['target_port_wwn_list'] init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case. else: # No lookup service, pre-zoned case.
target_wwns = fc_targets target_wwns = fc_targets
fc_targets.extend(metro_fc_targets)
for initiator in initiator_wwns: for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns init_targ_map[initiator] = fc_targets
return list(set(target_wwns)), init_targ_map return list(set(target_wwns)), init_targ_map

View File

@ -91,6 +91,7 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
3.1.0 - Support for replication groups (Tiramisu) 3.1.0 - Support for replication groups (Tiramisu)
- Deprecate backend xml configuration - Deprecate backend xml configuration
- Support for async replication (vmax-replication-enhancements) - Support for async replication (vmax-replication-enhancements)
- Support for SRDF/Metro (vmax-replication-enhancements)
""" """
VERSION = "3.1.0" VERSION = "3.1.0"
@ -238,7 +239,10 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
""" """
device_info = self.common.initialize_connection( device_info = self.common.initialize_connection(
volume, connector) volume, connector)
return self.get_iscsi_dict(device_info, volume) if device_info:
return self.get_iscsi_dict(device_info, volume)
else:
return {}
def get_iscsi_dict(self, device_info, volume): def get_iscsi_dict(self, device_info, volume):
"""Populate iscsi dict to pass to nova. """Populate iscsi dict to pass to nova.
@ -247,6 +251,7 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
:param volume: volume object :param volume: volume object
:returns: iscsi dict :returns: iscsi dict
""" """
metro_ip_iqn, metro_host_lun = None, None
try: try:
ip_and_iqn = device_info['ip_and_iqn'] ip_and_iqn = device_info['ip_and_iqn']
is_multipath = device_info['is_multipath'] is_multipath = device_info['is_multipath']
@ -257,8 +262,14 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
% {'e': six.text_type(e)}) % {'e': six.text_type(e)})
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)
if device_info.get('metro_ip_and_iqn'):
LOG.debug("Volume is Metro device...")
metro_ip_iqn = device_info['metro_ip_and_iqn']
metro_host_lun = device_info['metro_hostlunid']
iscsi_properties = self.vmax_get_iscsi_properties( iscsi_properties = self.vmax_get_iscsi_properties(
volume, ip_and_iqn, is_multipath, host_lun_id) volume, ip_and_iqn, is_multipath, host_lun_id,
metro_ip_iqn, metro_host_lun)
LOG.info("iSCSI properties are: %(props)s", LOG.info("iSCSI properties are: %(props)s",
{'props': strutils.mask_dict_password(iscsi_properties)}) {'props': strutils.mask_dict_password(iscsi_properties)})
@ -266,7 +277,8 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
'data': iscsi_properties} 'data': iscsi_properties}
def vmax_get_iscsi_properties(self, volume, ip_and_iqn, def vmax_get_iscsi_properties(self, volume, ip_and_iqn,
is_multipath, host_lun_id): is_multipath, host_lun_id,
metro_ip_iqn, metro_host_lun):
"""Gets iscsi configuration. """Gets iscsi configuration.
We ideally get saved information in the volume entity, but fall back We ideally get saved information in the volume entity, but fall back
@ -286,15 +298,32 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
:param ip_and_iqn: list of ip and iqn dicts :param ip_and_iqn: list of ip and iqn dicts
:param is_multipath: flag for multipath :param is_multipath: flag for multipath
:param host_lun_id: the host lun id of the device :param host_lun_id: the host lun id of the device
:param metro_ip_iqn: metro remote device ip and iqn, if applicable
:param metro_host_lun: metro remote host lun, if applicable
:returns: properties :returns: properties
""" """
properties = {} properties = {}
populate_plurals = False
if len(ip_and_iqn) > 1 and is_multipath: if len(ip_and_iqn) > 1 and is_multipath:
populate_plurals = True
elif len(ip_and_iqn) == 1 and is_multipath and metro_ip_iqn:
populate_plurals = True
if populate_plurals:
properties['target_portals'] = ([t['ip'] + ":3260" for t in properties['target_portals'] = ([t['ip'] + ":3260" for t in
ip_and_iqn]) ip_and_iqn])
properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in
ip_and_iqn]) ip_and_iqn])
properties['target_luns'] = [host_lun_id] * len(ip_and_iqn) properties['target_luns'] = [host_lun_id] * len(ip_and_iqn)
if metro_ip_iqn:
LOG.info("Volume %(vol)s is metro-enabled - "
"adding additional attachment information",
{'vol': volume.name})
properties['target_portals'].extend(([t['ip'] + ":3260" for t in
metro_ip_iqn]))
properties['target_iqns'].extend(([t['iqn'].split(",")[0] for t in
metro_ip_iqn]))
properties['target_luns'].extend(
[metro_host_lun] * len(metro_ip_iqn))
properties['target_discovered'] = True properties['target_discovered'] = True
properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0] properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0]
properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260" properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260"
@ -318,15 +347,8 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
def terminate_connection(self, volume, connector, **kwargs): def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector. """Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object :param volume: the volume object
:param connector: the connector object :param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
""" """
self.common.terminate_connection(volume, connector) self.common.terminate_connection(volume, connector)

View File

@ -76,6 +76,7 @@ class VMAXMasking(object):
masking_view_dict[utils.WORKLOAD], masking_view_dict[utils.WORKLOAD],
masking_view_dict[utils.DISABLECOMPRESSION], masking_view_dict[utils.DISABLECOMPRESSION],
masking_view_dict[utils.IS_RE], rep_mode) masking_view_dict[utils.IS_RE], rep_mode)
rollback_dict = masking_view_dict
try: try:
error_message = self._get_or_create_masking_view( error_message = self._get_or_create_masking_view(
@ -87,6 +88,10 @@ class VMAXMasking(object):
"in the masking view is %(storage_name)s.", "in the masking view is %(storage_name)s.",
{'masking_name': maskingview_name, {'masking_name': maskingview_name,
'storage_name': storagegroup_name}) 'storage_name': storagegroup_name})
rollback_dict['portgroup_name'] = (
self.rest.get_element_from_masking_view(
serial_number, maskingview_name, portgroup=True))
except Exception as e: except Exception as e:
LOG.exception( LOG.exception(
"Masking View creation or retrieval was not successful " "Masking View creation or retrieval was not successful "
@ -95,14 +100,6 @@ class VMAXMasking(object):
{'maskingview_name': masking_view_dict[utils.MV_NAME]}) {'maskingview_name': masking_view_dict[utils.MV_NAME]})
error_message = six.text_type(e) error_message = six.text_type(e)
rollback_dict = masking_view_dict
try:
rollback_dict['portgroup_name'] = (
self.rest.get_element_from_masking_view(
serial_number, maskingview_name, portgroup=True))
except Exception as e:
error_message = ("Error retrieving port group. Exception "
"received: %(e)s" % {'e': six.text_type(e)})
if 'source_nf_sg' in masking_view_dict: if 'source_nf_sg' in masking_view_dict:
default_sg_name = masking_view_dict['source_nf_sg'] default_sg_name = masking_view_dict['source_nf_sg']
rollback_dict['default_sg_name'] = default_sg_name rollback_dict['default_sg_name'] = default_sg_name
@ -856,12 +853,12 @@ class VMAXMasking(object):
serial_number, rollback_dict['init_group_name'], serial_number, rollback_dict['init_group_name'],
rollback_dict['connector']) rollback_dict['connector'])
try: try:
found_sg_name = ( found_sg_name_list = (
self.rest.get_storage_groups_from_volume( self.rest.get_storage_groups_from_volume(
serial_number, rollback_dict['device_id'])) serial_number, rollback_dict['device_id']))
# Volume is not associated with any storage group so add # Volume is not associated with any storage group so add
# it back to the default. # it back to the default.
if not found_sg_name: if not found_sg_name_list:
error_message = self._check_adding_volume_to_storage_group( error_message = self._check_adding_volume_to_storage_group(
serial_number, device_id, serial_number, device_id,
rollback_dict['default_sg_name'], rollback_dict['default_sg_name'],
@ -874,17 +871,24 @@ class VMAXMasking(object):
rollback_dict['isLiveMigration'] is True): rollback_dict['isLiveMigration'] is True):
# Live migration case. # Live migration case.
# Remove from nonfast storage group to fast sg # Remove from nonfast storage group to fast sg
self.failed_live_migration(rollback_dict, found_sg_name, self.failed_live_migration(rollback_dict, found_sg_name_list,
rollback_dict[utils.EXTRA_SPECS]) rollback_dict[utils.EXTRA_SPECS])
else: else:
LOG.info("The storage group found is %(found_sg_name)s.", LOG.info("Volume %(vol_id)s is in %(list_size)d storage"
{'found_sg_name': found_sg_name}) "groups. The storage groups are %(found_sg_list)s.",
{'vol_id': volume.id,
'list_size': len(found_sg_name_list),
'found_sg_list': found_sg_name_list})
# Check the name, see if it is the default storage group # Check the name, see if it is the default storage group
# or another. # or another.
if found_sg_name != rollback_dict['default_sg_name']: sg_found = False
for found_sg_name in found_sg_name_list:
if found_sg_name == rollback_dict['default_sg_name']:
sg_found = True
if not sg_found:
# Remove it from its current storage group and return it # Remove it from its current storage group and return it
# to its default masking view if slo is defined. # to its default storage group if slo is defined.
self.remove_and_reset_members( self.remove_and_reset_members(
serial_number, volume, device_id, serial_number, volume, device_id,
rollback_dict['volume_name'], rollback_dict['volume_name'],
@ -997,7 +1001,7 @@ class VMAXMasking(object):
return init_group_name return init_group_name
def _check_ig_rollback( def _check_ig_rollback(
self, serial_number, init_group_name, connector): self, serial_number, init_group_name, connector, force=False):
"""Check if rollback action is required on an initiator group. """Check if rollback action is required on an initiator group.
If anything goes wrong on a masking view creation, we need to check if If anything goes wrong on a masking view creation, we need to check if
@ -1008,18 +1012,22 @@ class VMAXMasking(object):
:param serial_number: the array serial number :param serial_number: the array serial number
:param init_group_name: the initiator group name :param init_group_name: the initiator group name
:param connector: the connector object :param connector: the connector object
:param force: force a delete even if no entry in login table
""" """
initiator_names = self.find_initiator_names(connector) initiator_names = self.find_initiator_names(connector)
found_ig_name = self._find_initiator_group( found_ig_name = self._find_initiator_group(
serial_number, initiator_names) serial_number, initiator_names)
if found_ig_name: if found_ig_name:
if found_ig_name == init_group_name: if found_ig_name == init_group_name:
host = init_group_name.split("-")[1] force = True
LOG.debug("Searching for masking views associated with " if force:
"%(init_group_name)s", found_ig_name = init_group_name
{'init_group_name': init_group_name}) host = init_group_name.split("-")[1]
self._last_volume_delete_initiator_group( LOG.debug("Searching for masking views associated with "
serial_number, found_ig_name, host) "%(init_group_name)s",
{'init_group_name': init_group_name})
self._last_volume_delete_initiator_group(
serial_number, found_ig_name, host)
@coordination.synchronized("emc-vol-{device_id}") @coordination.synchronized("emc-vol-{device_id}")
def remove_and_reset_members( def remove_and_reset_members(
@ -1058,6 +1066,10 @@ class VMAXMasking(object):
storagegroup_names = (self.rest.get_storage_groups_from_volume( storagegroup_names = (self.rest.get_storage_groups_from_volume(
serial_number, device_id)) serial_number, device_id))
if storagegroup_names: if storagegroup_names:
if async_grp is not None:
for index, sg in enumerate(storagegroup_names):
if sg == async_grp:
storagegroup_names.pop(index)
if len(storagegroup_names) == 1 and reset is True: if len(storagegroup_names) == 1 and reset is True:
move = True move = True
elif connector is not None and reset is True: elif connector is not None and reset is True:
@ -1072,10 +1084,6 @@ class VMAXMasking(object):
extra_specs, connector, move) extra_specs, connector, move)
break break
else: else:
if reset is True and async_grp is not None:
for index, sg in enumerate(storagegroup_names):
if sg == async_grp:
storagegroup_names.pop(index)
for sg_name in storagegroup_names: for sg_name in storagegroup_names:
self.remove_volume_from_sg( self.remove_volume_from_sg(
serial_number, device_id, volume_name, sg_name, serial_number, device_id, volume_name, sg_name,
@ -1553,7 +1561,8 @@ class VMAXMasking(object):
@coordination.synchronized("emc-ig-{ig_name}") @coordination.synchronized("emc-ig-{ig_name}")
def _delete_ig(ig_name): def _delete_ig(ig_name):
# Check initiator group hasn't been recently deleted # Check initiator group hasn't been recently deleted
ig_details = self.rest.get_initiator_group(ig_name) ig_details = self.rest.get_initiator_group(
serial_number, ig_name)
if ig_details: if ig_details:
LOG.debug( LOG.debug(
"Last volume associated with the initiator " "Last volume associated with the initiator "
@ -1652,3 +1661,20 @@ class VMAXMasking(object):
array, source_nf_sg, source_parent_sg, extra_specs) array, source_nf_sg, source_parent_sg, extra_specs)
# Delete non fast storage group # Delete non fast storage group
self.rest.delete_storage_group(array, source_nf_sg) self.rest.delete_storage_group(array, source_nf_sg)
def attempt_ig_cleanup(self, connector, protocol, serial_number, force):
"""Attempt to cleanup an orphan initiator group
:param connector: connector object
:param protocol: iscsi or fc
:param serial_number: extra the array serial number
"""
protocol = self.utils.get_short_protocol_type(protocol)
host_name = connector['host']
short_host_name = self.utils.get_host_short_name(host_name)
init_group = (
("OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': short_host_name,
'protocol': protocol}))
self._check_ig_rollback(
serial_number, init_group, connector, force)

View File

@ -402,6 +402,7 @@ class VMAXProvision(object):
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload} return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, target_device, def break_rdf_relationship(self, array, device_id, target_device,
rdf_group, rep_extra_specs, state): rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices. """Break the rdf relationship between a pair of devices.
@ -413,28 +414,40 @@ class VMAXProvision(object):
:param rep_extra_specs: replication extra specs :param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair :param state: the state of the rdf pair
""" """
LOG.info("Splitting rdf pair: source device: %(src)s " LOG.info("Suspending rdf pair: source device: %(src)s "
"target device: %(tgt)s.", "target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device}) {'src': device_id, 'tgt': target_device})
state_check = state.lower() if state.lower() == utils.RDF_SYNCINPROG_STATE:
if state_check == utils.RDF_SYNC_STATE: self.rest.wait_for_rdf_consistent_state(
self.rest.modify_rdf_device_pair( array, device_id, target_device,
array, device_id, rdf_group, rep_extra_specs, split=True) rep_extra_specs, state)
elif state_check in [utils.RDF_CONSISTENT_STATE, self.rest.modify_rdf_device_pair(
utils.RDF_SYNCINPROG_STATE]: array, device_id, rdf_group, rep_extra_specs, suspend=True)
if state_check == utils.RDF_SYNCINPROG_STATE: self.delete_rdf_pair(array, device_id, rdf_group,
self.rest.wait_for_rdf_consistent_state( target_device, rep_extra_specs)
array, device_id, target_device,
rep_extra_specs, state) def break_metro_rdf_pair(self, array, device_id, target_device,
self.rest.modify_rdf_device_pair( rdf_group, rep_extra_specs, metro_grp):
array, device_id, rdf_group, rep_extra_specs, suspend=True) """Delete replication for a Metro device pair.
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.", Need to suspend the entire group before we can delete a single pair.
{'src': device_id, 'tgt': target_device}) :param array: the array serial number
self.delete_rdf_pair(array, device_id, rdf_group, rep_extra_specs) :param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: the replication extra specifications
:param metro_grp: the metro storage group name
"""
# Suspend I/O on the RDF links...
LOG.info("Suspending I/O for all volumes in the RDF group: %(rdfg)s",
{'rdfg': rdf_group})
self.disable_group_replication(
array, metro_grp, rdf_group, rep_extra_specs)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def delete_rdf_pair( def delete_rdf_pair(
self, array, device_id, rdf_group, extra_specs): self, array, device_id, rdf_group, target_device, extra_specs):
"""Delete an rdf pairing. """Delete an rdf pairing.
If the replication mode is synchronous, only one attempt is required If the replication mode is synchronous, only one attempt is required
@ -446,8 +459,12 @@ class VMAXProvision(object):
:param array: the array serial number :param array: the array serial number
:param device_id: source volume device id :param device_id: source volume device id
:param rdf_group: the rdf group number :param rdf_group: the rdf group number
:param target_device: the target device
:param extra_specs: extra specifications :param extra_specs: extra specifications
""" """
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if (extra_specs.get(utils.REP_MODE) and if (extra_specs.get(utils.REP_MODE) and
extra_specs.get(utils.REP_MODE) == utils.REP_SYNC): extra_specs.get(utils.REP_MODE) == utils.REP_SYNC):
return self.rest.delete_rdf_pair(array, device_id, rdf_group) return self.rest.delete_rdf_pair(array, device_id, rdf_group)
@ -504,8 +521,13 @@ class VMAXProvision(object):
action = "Failing back" action = "Failing back"
LOG.info("%(action)s rdf pair: source device: %(src)s ", LOG.info("%(action)s rdf pair: source device: %(src)s ",
{'action': action, 'src': device_id}) {'action': action, 'src': device_id})
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, extra_specs, split=False) @coordination.synchronized('emc-rg-{rdfg_no}')
def _failover_volume(rdfg_no):
self.rest.modify_rdf_device_pair(
array, device_id, rdfg_no, extra_specs)
_failover_volume(rdf_group)
def get_or_create_volume_group(self, array, group, extra_specs): def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group. """Get or create a volume group.
@ -657,7 +679,7 @@ class VMAXProvision(object):
return rc return rc
def enable_group_replication(self, array, storagegroup_name, def enable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs): rdf_group_num, extra_specs, establish=False):
"""Resume rdf replication on a storage group. """Resume rdf replication on a storage group.
Replication is enabled by default. This allows resuming Replication is enabled by default. This allows resuming
@ -666,8 +688,9 @@ class VMAXProvision(object):
:param storagegroup_name: the storagegroup name :param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number :param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications :param extra_specs: the extra specifications
:param establish: flag to indicate 'establish' instead of 'resume'
""" """
action = "Resume" action = "Establish" if establish is True else "Resume"
self.rest.modify_storagegroup_rdf( self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs) array, storagegroup_name, rdf_group_num, action, extra_specs)

View File

@ -324,7 +324,8 @@ class VMAXRest(object):
return resource_object return resource_object
def get_resource(self, array, category, resource_type, def get_resource(self, array, category, resource_type,
resource_name=None, params=None, private=''): resource_name=None, params=None, private='',
version=U4V_VERSION):
"""Get resource details from array. """Get resource details from array.
:param array: the array serial number :param array: the array serial number
@ -333,10 +334,11 @@ class VMAXRest(object):
:param resource_name: the name of a specific resource :param resource_name: the name of a specific resource
:param params: query parameters :param params: query parameters
:param private: empty string or '/private' if private url :param private: empty string or '/private' if private url
:param version: None or specific version number if required
:returns: resource object -- dict or None :returns: resource object -- dict or None
""" """
target_uri = self._build_uri(array, category, resource_type, target_uri = self._build_uri(array, category, resource_type,
resource_name, private) resource_name, private, version=version)
return self._get_request(target_uri, resource_type, params) return self._get_request(target_uri, resource_type, params)
def create_resource(self, array, category, resource_type, payload, def create_resource(self, array, category, resource_type, payload,
@ -495,20 +497,6 @@ class VMAXRest(object):
array, SLOPROVISIONING, 'storagegroup', array, SLOPROVISIONING, 'storagegroup',
resource_name=storage_group_name) resource_name=storage_group_name)
def get_storage_group_list(self, array, params=None):
""""Return a list of storage groups.
:param array: the array serial number
:param params: optional filter parameters
:returns: storage group list
"""
sg_list = []
sg_details = self.get_resource(array, SLOPROVISIONING,
'storagegroup', params=params)
if sg_details:
sg_list = sg_details['storageGroupId']
return sg_list
def get_num_vols_in_sg(self, array, storage_group_name): def get_num_vols_in_sg(self, array, storage_group_name):
"""Get the number of volumes in a storage group. """Get the number of volumes in a storage group.
@ -1271,7 +1259,7 @@ class VMAXRest(object):
resource_name=initiator_group, params=params) resource_name=initiator_group, params=params)
def get_initiator(self, array, initiator_id): def get_initiator(self, array, initiator_id):
"""Retrieve initaitor details from the array. """Retrieve initiator details from the array.
:param array: the array serial number :param array: the array serial number
:param initiator_id: the initiator id :param initiator_id: the initiator id
@ -1282,14 +1270,15 @@ class VMAXRest(object):
resource_name=initiator_id) resource_name=initiator_id)
def get_initiator_list(self, array, params=None): def get_initiator_list(self, array, params=None):
"""Retrieve initaitor list from the array. """Retrieve initiator list from the array.
:param array: the array serial number :param array: the array serial number
:param params: dict of optional params :param params: dict of optional params
:returns: list of initiators :returns: list of initiators
""" """
init_dict = self.get_resource( version = '90' if self.is_next_gen_array(array) else U4V_VERSION
array, SLOPROVISIONING, 'initiator', params=params) init_dict = self.get_resource(array, SLOPROVISIONING, 'initiator',
params=params, version=version)
try: try:
init_list = init_dict['initiatorId'] init_list = init_dict['initiatorId']
except KeyError: except KeyError:
@ -1953,8 +1942,9 @@ class VMAXRest(object):
:param extra_specs: the extra specs :param extra_specs: the extra specs
:returns: rdf_dict :returns: rdf_dict
""" """
rep_mode = (extra_specs[utils.REP_MODE] rep_mode = extra_specs[utils.REP_MODE]
if extra_specs.get(utils.REP_MODE) else utils.REP_SYNC) if rep_mode == utils.REP_METRO:
rep_mode = 'Active'
payload = ({"deviceNameListSource": [{"name": device_id}], payload = ({"deviceNameListSource": [{"name": device_id}],
"deviceNameListTarget": [{"name": target_device}], "deviceNameListTarget": [{"name": target_device}],
"replicationMode": rep_mode, "replicationMode": rep_mode,
@ -1963,6 +1953,9 @@ class VMAXRest(object):
if rep_mode == utils.REP_ASYNC: if rep_mode == utils.REP_ASYNC:
payload_update = self._get_async_payload_info(array, rdf_group_no) payload_update = self._get_async_payload_info(array, rdf_group_no)
payload.update(payload_update) payload.update(payload_update)
elif rep_mode == 'Active':
payload = self.get_metro_payload_info(
array, payload, rdf_group_no, extra_specs)
resource_type = ("rdf_group/%(rdf_num)s/volume" resource_type = ("rdf_group/%(rdf_num)s/volume"
% {'rdf_num': rdf_group_no}) % {'rdf_num': rdf_group_no})
status_code, job = self.create_resource(array, REPLICATION, status_code, job = self.create_resource(array, REPLICATION,
@ -1988,17 +1981,40 @@ class VMAXRest(object):
payload_update = {'consExempt': 'true'} payload_update = {'consExempt': 'true'}
return payload_update return payload_update
@coordination.synchronized('emc-rg-{rdf_group}') def get_metro_payload_info(self, array, payload,
rdf_group_no, extra_specs):
"""Get the payload details for a metro active create pair.
:param array: the array serial number
:param payload: the payload
:param rdf_group_no: the rdf group number
:param extra_specs: the replication configuration
:return: updated payload
"""
num_vols = 0
rdfg_details = self.get_rdf_group(array, rdf_group_no)
if rdfg_details is not None and rdfg_details.get('numDevices'):
num_vols = int(rdfg_details['numDevices'])
if num_vols == 0:
# First volume - set bias if required
if (extra_specs.get(utils.METROBIAS)
and extra_specs[utils.METROBIAS] is True):
payload.update({'metroBias': 'true'})
else:
# Need to format subsequent volumes
payload['format'] = 'true'
payload.pop('establish')
payload['rdfType'] = 'NA'
return payload
def modify_rdf_device_pair( def modify_rdf_device_pair(
self, array, device_id, rdf_group, extra_specs, self, array, device_id, rdf_group, extra_specs, suspend=False):
split=False, suspend=False):
"""Modify an rdf device pair. """Modify an rdf device pair.
:param array: the array serial number :param array: the array serial number
:param device_id: the device id :param device_id: the device id
:param rdf_group: the rdf group :param rdf_group: the rdf group
:param extra_specs: the extra specs :param extra_specs: the extra specs
:param split: flag to indicate "split" action
:param suspend: flag to indicate "suspend" action :param suspend: flag to indicate "suspend" action
""" """
common_opts = {"force": 'false', common_opts = {"force": 'false',
@ -2006,14 +2022,11 @@ class VMAXRest(object):
"star": 'false', "star": 'false',
"hop2": 'false', "hop2": 'false',
"bypass": 'false'} "bypass": 'false'}
if split: if suspend:
common_opts.update({"immediate": 'false'}) if (extra_specs.get(utils.REP_MODE)
payload = {"action": "Split", and extra_specs[utils.REP_MODE] == utils.REP_ASYNC):
"executionOption": "ASYNCHRONOUS", common_opts.update({"immediate": 'false',
"split": common_opts} "consExempt": 'true'})
elif suspend:
common_opts.update({"immediate": 'false', "consExempt": 'true'})
payload = {"action": "Suspend", payload = {"action": "Suspend",
"executionOption": "ASYNCHRONOUS", "executionOption": "ASYNCHRONOUS",
"suspend": common_opts} "suspend": common_opts}
@ -2034,7 +2047,6 @@ class VMAXRest(object):
self.wait_for_job('Modify device pair', sc, self.wait_for_job('Modify device pair', sc,
job, extra_specs) job, extra_specs)
@coordination.synchronized('emc-rg-{rdf_group}')
def delete_rdf_pair(self, array, device_id, rdf_group): def delete_rdf_pair(self, array, device_id, rdf_group):
"""Delete an rdf pair. """Delete an rdf pair.
@ -2199,7 +2211,10 @@ class VMAXRest(object):
elif (action.lower() in ["split", "failover", "suspend"] and elif (action.lower() in ["split", "failover", "suspend"] and
state.lower() in [utils.RDF_SYNC_STATE, state.lower() in [utils.RDF_SYNC_STATE,
utils.RDF_SYNCINPROG_STATE, utils.RDF_SYNCINPROG_STATE,
utils.RDF_CONSISTENT_STATE]): utils.RDF_CONSISTENT_STATE,
utils.RDF_ACTIVE,
utils.RDF_ACTIVEACTIVE,
utils.RDF_ACTIVEBIAS]):
mod_rqd = True mod_rqd = True
break break
return mod_rqd return mod_rqd
@ -2219,6 +2234,14 @@ class VMAXRest(object):
rdf_group_num, action) rdf_group_num, action)
if mod_reqd: if mod_reqd:
payload = {"executionOption": "ASYNCHRONOUS", "action": action} payload = {"executionOption": "ASYNCHRONOUS", "action": action}
if action.lower() == 'suspend':
payload['suspend'] = {"force": "true"}
elif action.lower() == 'establish':
metro_bias = (
True if extra_specs.get(utils.METROBIAS)
and extra_specs[utils.METROBIAS] is True else False)
payload['establish'] = {"metroBias": metro_bias,
"full": 'false'}
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s' resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'
% {'sg_name': storagegroup_name, % {'sg_name': storagegroup_name,
'rdf_num': rdf_group_num}) 'rdf_num': rdf_group_num})

View File

@ -60,12 +60,17 @@ IS_RE = 'replication_enabled'
DISABLECOMPRESSION = 'storagetype:disablecompression' DISABLECOMPRESSION = 'storagetype:disablecompression'
REP_SYNC = 'Synchronous' REP_SYNC = 'Synchronous'
REP_ASYNC = 'Asynchronous' REP_ASYNC = 'Asynchronous'
REP_METRO = 'Metro'
REP_MODE = 'rep_mode' REP_MODE = 'rep_mode'
RDF_SYNC_STATE = 'synchronized' RDF_SYNC_STATE = 'synchronized'
RDF_SYNCINPROG_STATE = 'syncinprog' RDF_SYNCINPROG_STATE = 'syncinprog'
RDF_CONSISTENT_STATE = 'consistent' RDF_CONSISTENT_STATE = 'consistent'
RDF_SUSPENDED_STATE = 'suspended' RDF_SUSPENDED_STATE = 'suspended'
RDF_FAILEDOVER_STATE = 'failed over' RDF_FAILEDOVER_STATE = 'failed over'
RDF_ACTIVE = 'active'
RDF_ACTIVEACTIVE = 'activeactive'
RDF_ACTIVEBIAS = 'activebias'
METROBIAS = 'metro_bias'
# Cinder.conf vmax configuration # Cinder.conf vmax configuration
VMAX_SERVER_IP = 'san_ip' VMAX_SERVER_IP = 'san_ip'
@ -511,6 +516,18 @@ class VMAXUtils(object):
rep_mode = target.get('mode', '') rep_mode = target.get('mode', '')
if rep_mode.lower() in ['async', 'asynchronous']: if rep_mode.lower() in ['async', 'asynchronous']:
rep_config['mode'] = REP_ASYNC rep_config['mode'] = REP_ASYNC
elif rep_mode.lower() == 'metro':
rep_config['mode'] = REP_METRO
metro_bias = target.get('metro_use_bias', 'false')
if strutils.bool_from_string(metro_bias):
rep_config[METROBIAS] = True
else:
rep_config[METROBIAS] = False
allow_delete_metro = target.get('allow_delete_metro', 'false')
if strutils.bool_from_string(allow_delete_metro):
rep_config['allow_delete_metro'] = True
else:
rep_config['allow_delete_metro'] = False
else: else:
rep_config['mode'] = REP_SYNC rep_config['mode'] = REP_SYNC
@ -749,12 +766,17 @@ class VMAXUtils(object):
"""Get the replication prefix. """Get the replication prefix.
Replication prefix for storage group naming is based on whether it is Replication prefix for storage group naming is based on whether it is
synchronous or asynchronous replication mode. synchronous, asynchronous, or metro replication mode.
:param rep_mode: flag to indicate if replication is async :param rep_mode: flag to indicate if replication is async
:return: prefix :return: prefix
""" """
prefix = "-RE" if rep_mode == REP_SYNC else "-RA" if rep_mode == REP_ASYNC:
prefix = "-RA"
elif rep_mode == REP_METRO:
prefix = "-RM"
else:
prefix = "-RE"
return prefix return prefix
@staticmethod @staticmethod
@ -764,7 +786,33 @@ class VMAXUtils(object):
:param rep_config: the replication configuration :param rep_config: the replication configuration
:return: group name :return: group name
""" """
rdf_group_name = rep_config['rdf_group_label'] async_grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg"
async_grp_name = "OS-%(rdf)s-async-rdf-sg" % {'rdf': rdf_group_name} % {'rdf': rep_config['rdf_group_label'],
LOG.debug("The async rdf managed group name is %s", async_grp_name) 'mode': rep_config['mode']})
LOG.debug("The async/ metro rdf managed group name is %(name)s",
{'name': async_grp_name})
return async_grp_name return async_grp_name
def is_metro_device(self, rep_config, extra_specs):
"""Determine if a volume is a Metro enabled device.
:param rep_config: the replication configuration
:param extra_specs: the extra specifications
:return: bool
"""
is_metro = (True if self.is_replication_enabled(extra_specs)
and rep_config is not None
and rep_config['mode'] == REP_METRO else False)
return is_metro
def does_vol_need_rdf_management_group(self, extra_specs):
"""Determine if a volume is a Metro or Async.
:param extra_specs: the extra specifications
:return: bool
"""
if (self.is_replication_enabled(extra_specs) and
extra_specs.get(REP_MODE, None) in
[REP_ASYNC, REP_METRO]):
return True
return False

View File

@ -0,0 +1,4 @@
---
features:
- Support for VMAX SRDF/Metro on VMAX cinder driver.