3PAR: Add Peer Persistence support
Background: Given 3PAR backend configured with replication setup, currently only Active/Passive replication is supported by 3PAR in OpenStack. When failover happens, nova does not support volume force-detach (from dead primary backend) / re-attach to secondary backend. Storage engineer's manual intervention is required. Proposed Solution: Given a system with Peer Persistence configured and replicated volume is created. When this volume is attached to an instance, vlun would be created automatically in secondary backend, in addition to primary backend. So that when a failover happens, it is seamless. This solution would apply under following conditions: 1] multipath is enabled 2] replication mode is set as "sync" 3] quorum witness is configured Closes bug: #1839140 Change-Id: I2d0342adace69314bd159d3d6415a87ff29db562
This commit is contained in:
parent
7bb05f327a
commit
46a16f1d61
@ -7278,6 +7278,133 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
self.standard_logout)
|
self.standard_logout)
|
||||||
self.assertDictEqual(expected_properties, result)
|
self.assertDictEqual(expected_properties, result)
|
||||||
|
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
def test_initialize_connection_peer_persistence(self, _mock_volume_types):
|
||||||
|
# setup_mock_client drive with default configuration
|
||||||
|
# and return the mock HTTP 3PAR client
|
||||||
|
conf = self.setup_configuration()
|
||||||
|
self.replication_targets[0]['replication_mode'] = 'sync'
|
||||||
|
self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192'
|
||||||
|
conf.replication_device = self.replication_targets
|
||||||
|
mock_client = self.setup_driver(config=conf)
|
||||||
|
|
||||||
|
mock_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.CLIENT_ID})
|
||||||
|
|
||||||
|
mock_replicated_client = self.setup_driver(config=conf)
|
||||||
|
mock_replicated_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.REPLICATION_CLIENT_ID})
|
||||||
|
|
||||||
|
_mock_volume_types.return_value = {
|
||||||
|
'name': 'replicated',
|
||||||
|
'extra_specs': {
|
||||||
|
'replication_enabled': '<is> True',
|
||||||
|
'replication:mode': 'sync',
|
||||||
|
'volume_type': self.volume_type_replicated}}
|
||||||
|
|
||||||
|
mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG}
|
||||||
|
mock_client.getCPG.return_value = {}
|
||||||
|
mock_client.getHost.side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
{'name': self.FAKE_HOST,
|
||||||
|
'FCPaths': [{'driverVersion': None,
|
||||||
|
'firmwareVersion': None,
|
||||||
|
'hostSpeed': 0,
|
||||||
|
'model': None,
|
||||||
|
'portPos': {'cardPort': 1, 'node': 7,
|
||||||
|
'slot': 1},
|
||||||
|
'vendor': None,
|
||||||
|
'wwn': self.wwn[0]},
|
||||||
|
{'driverVersion': None,
|
||||||
|
'firmwareVersion': None,
|
||||||
|
'hostSpeed': 0,
|
||||||
|
'model': None,
|
||||||
|
'portPos': {'cardPort': 1, 'node': 6,
|
||||||
|
'slot': 1},
|
||||||
|
'vendor': None,
|
||||||
|
'wwn': self.wwn[1]}]}]
|
||||||
|
mock_client.queryHost.return_value = {
|
||||||
|
'members': [{
|
||||||
|
'name': self.FAKE_HOST
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_client.getHostVLUNs.side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
[{'active': True,
|
||||||
|
'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'remoteName': self.wwn[1],
|
||||||
|
'lun': 90, 'type': 0}],
|
||||||
|
[{'active': True,
|
||||||
|
'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'remoteName': self.wwn[0],
|
||||||
|
'lun': 90, 'type': 0}]]
|
||||||
|
mock_replicated_client.getHostVLUNs.side_effect = (
|
||||||
|
mock_client.getHostVLUNs.side_effect)
|
||||||
|
|
||||||
|
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
|
||||||
|
{'volume_name': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun_id': 90,
|
||||||
|
'host': self.FAKE_HOST,
|
||||||
|
'nsp': 'something'})
|
||||||
|
mock_client.createVLUN.return_value = location
|
||||||
|
mock_replicated_client.createVLUN.return_value = location
|
||||||
|
|
||||||
|
expected_properties = {
|
||||||
|
'driver_volume_type': 'fibre_channel',
|
||||||
|
'data': {
|
||||||
|
'encrypted': False,
|
||||||
|
'target_lun': 90,
|
||||||
|
'target_wwn': ['0987654321234', '123456789000987',
|
||||||
|
'0987654321234', '123456789000987'],
|
||||||
|
'target_discovered': True,
|
||||||
|
'initiator_target_map':
|
||||||
|
{'123456789012345': ['0987654321234', '123456789000987',
|
||||||
|
'0987654321234', '123456789000987'],
|
||||||
|
'123456789054321': ['0987654321234', '123456789000987',
|
||||||
|
'0987654321234', '123456789000987']}}}
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_client') as mock_create_client, \
|
||||||
|
mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_replication_client') as mock_replication_client:
|
||||||
|
|
||||||
|
mock_create_client.return_value = mock_client
|
||||||
|
mock_replication_client.return_value = mock_replicated_client
|
||||||
|
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'enabled'
|
||||||
|
|
||||||
|
result = self.driver.initialize_connection(
|
||||||
|
volume,
|
||||||
|
self.connector_multipath_enabled)
|
||||||
|
|
||||||
|
expected = [
|
||||||
|
mock.call.getVolume(self.VOLUME_3PAR_NAME),
|
||||||
|
mock.call.getCPG(HPE3PAR_CPG),
|
||||||
|
mock.call.getHost(self.FAKE_HOST),
|
||||||
|
mock.call.queryHost(wwns=['123456789012345',
|
||||||
|
'123456789054321']),
|
||||||
|
mock.call.getHost(self.FAKE_HOST),
|
||||||
|
mock.call.getPorts(),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.createVLUN(
|
||||||
|
self.VOLUME_3PAR_NAME,
|
||||||
|
auto=True,
|
||||||
|
hostname=self.FAKE_HOST,
|
||||||
|
lun=None),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST)]
|
||||||
|
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.get_id_login +
|
||||||
|
self.standard_logout +
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
self.assertDictEqual(expected_properties, result)
|
||||||
|
|
||||||
def test_terminate_connection(self):
|
def test_terminate_connection(self):
|
||||||
# setup_mock_client drive with default configuration
|
# setup_mock_client drive with default configuration
|
||||||
# and return the mock HTTP 3PAR client
|
# and return the mock HTTP 3PAR client
|
||||||
@ -7470,6 +7597,8 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
|
|
||||||
def test_terminate_connection_more_vols(self):
|
def test_terminate_connection_more_vols(self):
|
||||||
mock_client = self.setup_driver()
|
mock_client = self.setup_driver()
|
||||||
|
mock_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.CLIENT_ID})
|
||||||
# mock more than one vlun on the host (don't even try to remove host)
|
# mock more than one vlun on the host (don't even try to remove host)
|
||||||
mock_client.getHostVLUNs.return_value = \
|
mock_client.getHostVLUNs.return_value = \
|
||||||
[
|
[
|
||||||
@ -7519,11 +7648,141 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
conn_info = self.driver.terminate_connection(self.volume,
|
conn_info = self.driver.terminate_connection(self.volume,
|
||||||
self.connector)
|
self.connector)
|
||||||
mock_client.assert_has_calls(
|
mock_client.assert_has_calls(
|
||||||
|
self.get_id_login +
|
||||||
|
self.standard_logout +
|
||||||
self.standard_login +
|
self.standard_login +
|
||||||
expect_less +
|
expect_less +
|
||||||
self.standard_logout)
|
self.standard_logout)
|
||||||
self.assertEqual(expect_conn, conn_info)
|
self.assertEqual(expect_conn, conn_info)
|
||||||
|
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
def test_terminate_connection_peer_persistence(self, _mock_volume_types):
|
||||||
|
# setup_mock_client drive with default configuration
|
||||||
|
# and return the mock HTTP 3PAR client
|
||||||
|
conf = self.setup_configuration()
|
||||||
|
self.replication_targets[0]['replication_mode'] = 'sync'
|
||||||
|
self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192'
|
||||||
|
conf.replication_device = self.replication_targets
|
||||||
|
mock_client = self.setup_driver(config=conf)
|
||||||
|
|
||||||
|
mock_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.CLIENT_ID})
|
||||||
|
|
||||||
|
mock_replicated_client = self.setup_driver(config=conf)
|
||||||
|
mock_replicated_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.REPLICATION_CLIENT_ID})
|
||||||
|
|
||||||
|
_mock_volume_types.return_value = {
|
||||||
|
'name': 'replicated',
|
||||||
|
'extra_specs': {
|
||||||
|
'replication_enabled': '<is> True',
|
||||||
|
'replication:mode': 'sync',
|
||||||
|
'volume_type': self.volume_type_replicated}}
|
||||||
|
|
||||||
|
effects = [
|
||||||
|
[{'active': False, 'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun': None, 'type': 0}],
|
||||||
|
hpeexceptions.HTTPNotFound,
|
||||||
|
hpeexceptions.HTTPNotFound]
|
||||||
|
|
||||||
|
mock_client.getHostVLUNs.side_effect = effects
|
||||||
|
mock_replicated_client.getHostVLUNs.side_effect = effects
|
||||||
|
|
||||||
|
getHost_side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
{'name': self.FAKE_HOST,
|
||||||
|
'FCPaths': [{'driverVersion': None,
|
||||||
|
'firmwareVersion': None,
|
||||||
|
'hostSpeed': 0,
|
||||||
|
'model': None,
|
||||||
|
'portPos': {'cardPort': 1, 'node': 7,
|
||||||
|
'slot': 1},
|
||||||
|
'vendor': None,
|
||||||
|
'wwn': self.wwn[0]},
|
||||||
|
{'driverVersion': None,
|
||||||
|
'firmwareVersion': None,
|
||||||
|
'hostSpeed': 0,
|
||||||
|
'model': None,
|
||||||
|
'portPos': {'cardPort': 1, 'node': 6,
|
||||||
|
'slot': 1},
|
||||||
|
'vendor': None,
|
||||||
|
'wwn': self.wwn[1]}]}]
|
||||||
|
queryHost_return_value = {
|
||||||
|
'members': [{
|
||||||
|
'name': self.FAKE_HOST
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_client.getHost.side_effect = getHost_side_effect
|
||||||
|
mock_client.queryHost.return_value = queryHost_return_value
|
||||||
|
|
||||||
|
mock_replicated_client.getHost.side_effect = getHost_side_effect
|
||||||
|
mock_replicated_client.queryHost.return_value = queryHost_return_value
|
||||||
|
|
||||||
|
expected = [
|
||||||
|
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.deleteVLUN(
|
||||||
|
self.VOLUME_3PAR_NAME,
|
||||||
|
None,
|
||||||
|
hostname=self.FAKE_HOST),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.deleteHost(self.FAKE_HOST),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.getPorts()]
|
||||||
|
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'enabled'
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_client') as mock_create_client, \
|
||||||
|
mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_replication_client') as mock_replication_client:
|
||||||
|
|
||||||
|
mock_create_client.return_value = mock_client
|
||||||
|
mock_replication_client.return_value = mock_replicated_client
|
||||||
|
|
||||||
|
conn_info = self.driver.terminate_connection(
|
||||||
|
volume, self.connector_multipath_enabled)
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
self.assertIn('data', conn_info)
|
||||||
|
self.assertIn('initiator_target_map', conn_info['data'])
|
||||||
|
mock_client.reset_mock()
|
||||||
|
mock_replicated_client.reset_mock()
|
||||||
|
mock_client.getHostVLUNs.side_effect = effects
|
||||||
|
mock_replicated_client.getHostVLUNs.side_effect = effects
|
||||||
|
|
||||||
|
# mock some deleteHost exceptions that are handled
|
||||||
|
delete_with_vlun = hpeexceptions.HTTPConflict(
|
||||||
|
error={'message': "has exported VLUN"})
|
||||||
|
delete_with_hostset = hpeexceptions.HTTPConflict(
|
||||||
|
error={'message': "host is a member of a set"})
|
||||||
|
mock_client.deleteHost = mock.Mock(
|
||||||
|
side_effect=[delete_with_vlun, delete_with_hostset])
|
||||||
|
|
||||||
|
conn_info = self.driver.terminate_connection(
|
||||||
|
volume, self.connector_multipath_enabled)
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
mock_client.reset_mock()
|
||||||
|
mock_replicated_client.reset_mock()
|
||||||
|
mock_client.getHostVLUNs.side_effect = effects
|
||||||
|
mock_replicated_client.getHostVLUNs.side_effect = effects
|
||||||
|
|
||||||
|
conn_info = self.driver.terminate_connection(
|
||||||
|
volume, self.connector_multipath_enabled)
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
|
||||||
def test_get_3par_host_from_wwn_iqn(self):
|
def test_get_3par_host_from_wwn_iqn(self):
|
||||||
mock_client = self.setup_driver()
|
mock_client = self.setup_driver()
|
||||||
mock_client.getHosts.return_value = {
|
mock_client.getHosts.return_value = {
|
||||||
@ -7954,7 +8213,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -7969,6 +8228,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
mock_client.assert_has_calls(expected)
|
mock_client.assert_has_calls(expected)
|
||||||
|
|
||||||
self.assertEqual('fake', host['name'])
|
self.assertEqual('fake', host['name'])
|
||||||
|
self.assertEqual(HPE3PAR_CPG, cpg)
|
||||||
|
|
||||||
def test_create_host(self):
|
def test_create_host(self):
|
||||||
# setup_mock_client drive with default configuration
|
# setup_mock_client drive with default configuration
|
||||||
@ -8001,7 +8261,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8043,7 +8303,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8083,7 +8343,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8122,7 +8382,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8175,7 +8435,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8218,7 +8478,7 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host = self.driver._create_host(
|
host, cpg = self.driver._create_host(
|
||||||
common,
|
common,
|
||||||
self.volume,
|
self.volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
@ -8417,8 +8677,10 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
with mock.patch.object(hpecommon.HPE3PARCommon,
|
with mock.patch.object(hpecommon.HPE3PARCommon,
|
||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'disabled'
|
||||||
result = self.driver.initialize_connection(
|
result = self.driver.initialize_connection(
|
||||||
self.volume,
|
volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -8477,8 +8739,10 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
with mock.patch.object(hpecommon.HPE3PARCommon,
|
with mock.patch.object(hpecommon.HPE3PARCommon,
|
||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'disabled'
|
||||||
result = self.driver.initialize_connection(
|
result = self.driver.initialize_connection(
|
||||||
self.volume,
|
volume,
|
||||||
self.connector_multipath_enabled)
|
self.connector_multipath_enabled)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -8553,6 +8817,111 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
expected_properties['data']['encrypted'] = True
|
expected_properties['data']['encrypted'] = True
|
||||||
self.assertDictEqual(self.properties, result)
|
self.assertDictEqual(self.properties, result)
|
||||||
|
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
def test_initialize_connection_peer_persistence(self, _mock_volume_types):
|
||||||
|
# setup_mock_client drive with default configuration
|
||||||
|
# and return the mock HTTP 3PAR client
|
||||||
|
conf = self.setup_configuration()
|
||||||
|
self.replication_targets[0]['replication_mode'] = 'sync'
|
||||||
|
self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192'
|
||||||
|
self.replication_targets[0]['hpe3par_iscsi_ips'] = '1.1.1.2'
|
||||||
|
conf.replication_device = self.replication_targets
|
||||||
|
|
||||||
|
mock_client = self.setup_driver(config=conf)
|
||||||
|
mock_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.CLIENT_ID})
|
||||||
|
|
||||||
|
mock_replicated_client = self.setup_driver(config=conf)
|
||||||
|
mock_replicated_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.REPLICATION_CLIENT_ID})
|
||||||
|
|
||||||
|
_mock_volume_types.return_value = {
|
||||||
|
'name': 'replicated',
|
||||||
|
'extra_specs': {
|
||||||
|
'replication_enabled': '<is> True',
|
||||||
|
'replication:mode': 'sync',
|
||||||
|
'volume_type': self.volume_type_replicated}}
|
||||||
|
|
||||||
|
mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG}
|
||||||
|
mock_client.getCPG.return_value = {}
|
||||||
|
|
||||||
|
mock_client.getHost.side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
{'name': self.FAKE_HOST}]
|
||||||
|
mock_client.queryHost.return_value = {
|
||||||
|
'members': [{
|
||||||
|
'name': self.FAKE_HOST
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_client.getHostVLUNs.side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
[{'active': True,
|
||||||
|
'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun': self.TARGET_LUN, 'type': 0,
|
||||||
|
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]]
|
||||||
|
mock_replicated_client.getHostVLUNs.side_effect = [
|
||||||
|
hpeexceptions.HTTPNotFound('fake'),
|
||||||
|
[{'active': True,
|
||||||
|
'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun': self.TARGET_LUN, 'type': 0,
|
||||||
|
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]]
|
||||||
|
|
||||||
|
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
|
||||||
|
{'volume_name': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun_id': self.TARGET_LUN,
|
||||||
|
'host': self.FAKE_HOST,
|
||||||
|
'nsp': 'something'})
|
||||||
|
mock_client.createVLUN.return_value = location
|
||||||
|
mock_replicated_client.createVLUN.return_value = location
|
||||||
|
|
||||||
|
mock_client.getiSCSIPorts.return_value = [{
|
||||||
|
'IPAddr': '1.1.1.2',
|
||||||
|
'iSCSIName': self.TARGET_IQN,
|
||||||
|
}]
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_client') as mock_create_client, \
|
||||||
|
mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_replication_client') as mock_replication_client:
|
||||||
|
|
||||||
|
mock_create_client.return_value = mock_client
|
||||||
|
mock_replication_client.return_value = mock_replicated_client
|
||||||
|
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'enabled'
|
||||||
|
|
||||||
|
result = self.driver.initialize_connection(
|
||||||
|
volume,
|
||||||
|
self.connector_multipath_enabled)
|
||||||
|
|
||||||
|
expected = [
|
||||||
|
mock.call.getVolume(self.VOLUME_3PAR_NAME),
|
||||||
|
mock.call.getCPG(HPE3PAR_CPG),
|
||||||
|
mock.call.getHost(self.FAKE_HOST),
|
||||||
|
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
|
||||||
|
mock.call.getHost(self.FAKE_HOST),
|
||||||
|
mock.call.getiSCSIPorts(state=4),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.createVLUN(
|
||||||
|
self.VOLUME_3PAR_NAME,
|
||||||
|
auto=True,
|
||||||
|
hostname=self.FAKE_HOST,
|
||||||
|
portPos=self.FAKE_ISCSI_PORT['portPos'],
|
||||||
|
lun=None),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST)]
|
||||||
|
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.get_id_login +
|
||||||
|
self.standard_logout +
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
|
||||||
|
self.assertDictEqual(self.multipath_properties, result)
|
||||||
|
|
||||||
def test_terminate_connection_for_clear_chap_creds_not_found(self):
|
def test_terminate_connection_for_clear_chap_creds_not_found(self):
|
||||||
# setup_mock_client drive with default configuration
|
# setup_mock_client drive with default configuration
|
||||||
# and return the mock HTTP 3PAR client
|
# and return the mock HTTP 3PAR client
|
||||||
@ -9050,7 +9419,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
||||||
@ -9080,7 +9449,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
||||||
@ -9098,6 +9467,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
self.assertEqual(self.FAKE_HOST, host['name'])
|
self.assertEqual(self.FAKE_HOST, host['name'])
|
||||||
self.assertIsNone(auth_username)
|
self.assertIsNone(auth_username)
|
||||||
self.assertIsNone(auth_password)
|
self.assertIsNone(auth_password)
|
||||||
|
self.assertEqual(HPE3PAR_CPG, cpg)
|
||||||
|
|
||||||
def test_create_host_chap_enabled(self):
|
def test_create_host_chap_enabled(self):
|
||||||
# setup_mock_client drive with CHAP enabled configuration
|
# setup_mock_client drive with CHAP enabled configuration
|
||||||
@ -9135,7 +9505,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
||||||
@ -9202,7 +9572,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -9264,7 +9634,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -9299,7 +9669,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -9354,7 +9724,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -9399,7 +9769,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, user, pwd = self.driver._create_host(
|
host, user, pwd, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
|
||||||
@ -9433,7 +9803,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -9491,7 +9861,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
common = self.driver._login()
|
common = self.driver._login()
|
||||||
host, auth_username, auth_password = self.driver._create_host(
|
host, auth_username, auth_password, cpg = self.driver._create_host(
|
||||||
common, self.volume, self.connector)
|
common, self.volume, self.connector)
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
@ -10295,6 +10665,80 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
|||||||
expected +
|
expected +
|
||||||
self.standard_logout)
|
self.standard_logout)
|
||||||
|
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
def test_terminate_connection_peer_persistence(self, _mock_volume_types):
|
||||||
|
# setup_mock_client drive with default configuration
|
||||||
|
# and return the mock HTTP 3PAR client
|
||||||
|
conf = self.setup_configuration()
|
||||||
|
self.replication_targets[0]['replication_mode'] = 'sync'
|
||||||
|
self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192'
|
||||||
|
conf.replication_device = self.replication_targets
|
||||||
|
mock_client = self.setup_driver(config=conf)
|
||||||
|
|
||||||
|
mock_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.CLIENT_ID})
|
||||||
|
|
||||||
|
mock_replicated_client = self.setup_driver(config=conf)
|
||||||
|
mock_replicated_client.getStorageSystemInfo.return_value = (
|
||||||
|
{'id': self.REPLICATION_CLIENT_ID})
|
||||||
|
|
||||||
|
_mock_volume_types.return_value = {
|
||||||
|
'name': 'replicated',
|
||||||
|
'extra_specs': {
|
||||||
|
'replication_enabled': '<is> True',
|
||||||
|
'replication:mode': 'sync',
|
||||||
|
'volume_type': self.volume_type_replicated}}
|
||||||
|
|
||||||
|
mock_client.getHostVLUNs.return_value = [
|
||||||
|
{'active': False,
|
||||||
|
'volumeName': self.VOLUME_3PAR_NAME,
|
||||||
|
'lun': None, 'type': 0}]
|
||||||
|
|
||||||
|
mock_client.queryHost.return_value = {
|
||||||
|
'members': [{
|
||||||
|
'name': self.FAKE_HOST
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
volume = self.volume
|
||||||
|
volume['replication_status'] = 'enabled'
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_client') as mock_create_client, \
|
||||||
|
mock.patch.object(
|
||||||
|
hpecommon.HPE3PARCommon,
|
||||||
|
'_create_replication_client') as mock_replication_client:
|
||||||
|
|
||||||
|
mock_create_client.return_value = mock_client
|
||||||
|
mock_replication_client.return_value = mock_replicated_client
|
||||||
|
|
||||||
|
self.driver.terminate_connection(
|
||||||
|
volume,
|
||||||
|
self.connector_multipath_enabled)
|
||||||
|
|
||||||
|
expected = [
|
||||||
|
mock.call.queryHost(iqns=[self.connector['initiator']]),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.deleteVLUN(
|
||||||
|
self.VOLUME_3PAR_NAME,
|
||||||
|
None,
|
||||||
|
hostname=self.FAKE_HOST),
|
||||||
|
mock.call.getHostVLUNs(self.FAKE_HOST),
|
||||||
|
mock.call.modifyHost(
|
||||||
|
'fakehost',
|
||||||
|
{'pathOperation': 2,
|
||||||
|
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
|
||||||
|
mock.call.removeVolumeMetaData(
|
||||||
|
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
|
||||||
|
mock.call.removeVolumeMetaData(
|
||||||
|
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
|
||||||
|
|
||||||
|
mock_client.assert_has_calls(
|
||||||
|
self.standard_login +
|
||||||
|
expected +
|
||||||
|
self.standard_logout)
|
||||||
|
|
||||||
|
|
||||||
VLUNS5_RET = ({'members':
|
VLUNS5_RET = ({'members':
|
||||||
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
|
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
|
||||||
|
@ -289,11 +289,12 @@ class HPE3PARCommon(object):
|
|||||||
4.0.12 - Added multiattach support
|
4.0.12 - Added multiattach support
|
||||||
4.0.13 - Fixed detaching issue for volume with type multiattach
|
4.0.13 - Fixed detaching issue for volume with type multiattach
|
||||||
enabled. bug #1834660
|
enabled. bug #1834660
|
||||||
|
4.0.14 - Added Peer Persistence feature
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "4.0.13"
|
VERSION = "4.0.14"
|
||||||
|
|
||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
@ -1374,8 +1375,8 @@ class HPE3PARCommon(object):
|
|||||||
capacity = int(math.ceil(capacity / units.Mi))
|
capacity = int(math.ceil(capacity / units.Mi))
|
||||||
return capacity
|
return capacity
|
||||||
|
|
||||||
def _delete_3par_host(self, hostname):
|
def _delete_3par_host(self, hostname, client_obj):
|
||||||
self.client.deleteHost(hostname)
|
client_obj.deleteHost(hostname)
|
||||||
|
|
||||||
def _get_prioritized_host_on_3par(self, host, hosts, hostname):
|
def _get_prioritized_host_on_3par(self, host, hosts, hostname):
|
||||||
# Check whether host with wwn/iqn of initiator present on 3par
|
# Check whether host with wwn/iqn of initiator present on 3par
|
||||||
@ -1396,7 +1397,8 @@ class HPE3PARCommon(object):
|
|||||||
|
|
||||||
return host, hostname
|
return host, hostname
|
||||||
|
|
||||||
def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None):
|
def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None,
|
||||||
|
remote_client=None):
|
||||||
try:
|
try:
|
||||||
location = None
|
location = None
|
||||||
auto = True
|
auto = True
|
||||||
@ -1404,14 +1406,19 @@ class HPE3PARCommon(object):
|
|||||||
if lun_id is not None:
|
if lun_id is not None:
|
||||||
auto = False
|
auto = False
|
||||||
|
|
||||||
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = self.client
|
||||||
|
|
||||||
if nsp is None:
|
if nsp is None:
|
||||||
location = self.client.createVLUN(volume, hostname=hostname,
|
location = client_obj.createVLUN(volume, hostname=hostname,
|
||||||
auto=auto, lun=lun_id)
|
auto=auto, lun=lun_id)
|
||||||
else:
|
else:
|
||||||
port = self.build_portPos(nsp)
|
port = self.build_portPos(nsp)
|
||||||
location = self.client.createVLUN(volume, hostname=hostname,
|
location = client_obj.createVLUN(volume, hostname=hostname,
|
||||||
auto=auto, portPos=port,
|
auto=auto, portPos=port,
|
||||||
lun=lun_id)
|
lun=lun_id)
|
||||||
|
|
||||||
vlun_info = None
|
vlun_info = None
|
||||||
if location:
|
if location:
|
||||||
@ -1454,33 +1461,49 @@ class HPE3PARCommon(object):
|
|||||||
def get_ports(self):
|
def get_ports(self):
|
||||||
return self.client.getPorts()
|
return self.client.getPorts()
|
||||||
|
|
||||||
def get_active_target_ports(self):
|
def get_active_target_ports(self, remote_client=None):
|
||||||
ports = self.get_ports()
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
ports = remote_client.getPorts()
|
||||||
|
else:
|
||||||
|
client_obj = self.client
|
||||||
|
ports = self.get_ports()
|
||||||
|
|
||||||
target_ports = []
|
target_ports = []
|
||||||
for port in ports['members']:
|
for port in ports['members']:
|
||||||
if (
|
if (
|
||||||
port['mode'] == self.client.PORT_MODE_TARGET and
|
port['mode'] == client_obj.PORT_MODE_TARGET and
|
||||||
port['linkState'] == self.client.PORT_STATE_READY
|
port['linkState'] == client_obj.PORT_STATE_READY
|
||||||
):
|
):
|
||||||
port['nsp'] = self.build_nsp(port['portPos'])
|
port['nsp'] = self.build_nsp(port['portPos'])
|
||||||
target_ports.append(port)
|
target_ports.append(port)
|
||||||
|
|
||||||
return target_ports
|
return target_ports
|
||||||
|
|
||||||
def get_active_fc_target_ports(self):
|
def get_active_fc_target_ports(self, remote_client=None):
|
||||||
ports = self.get_active_target_ports()
|
ports = self.get_active_target_ports(remote_client)
|
||||||
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = self.client
|
||||||
|
|
||||||
fc_ports = []
|
fc_ports = []
|
||||||
for port in ports:
|
for port in ports:
|
||||||
if port['protocol'] == self.client.PORT_PROTO_FC:
|
if port['protocol'] == client_obj.PORT_PROTO_FC:
|
||||||
fc_ports.append(port)
|
fc_ports.append(port)
|
||||||
|
|
||||||
return fc_ports
|
return fc_ports
|
||||||
|
|
||||||
def get_active_iscsi_target_ports(self):
|
def get_active_iscsi_target_ports(self, remote_client=None):
|
||||||
ports = self.get_active_target_ports()
|
ports = self.get_active_target_ports(remote_client)
|
||||||
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = self.client
|
||||||
|
|
||||||
iscsi_ports = []
|
iscsi_ports = []
|
||||||
for port in ports:
|
for port in ports:
|
||||||
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
|
if port['protocol'] == client_obj.PORT_PROTO_ISCSI:
|
||||||
iscsi_ports.append(port)
|
iscsi_ports.append(port)
|
||||||
|
|
||||||
return iscsi_ports
|
return iscsi_ports
|
||||||
@ -1663,9 +1686,14 @@ class HPE3PARCommon(object):
|
|||||||
'license': license_to_check})
|
'license': license_to_check})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None):
|
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None,
|
||||||
|
remote_client=None):
|
||||||
"""find a VLUN on a 3PAR host."""
|
"""find a VLUN on a 3PAR host."""
|
||||||
vluns = self.client.getHostVLUNs(hostname)
|
if remote_client:
|
||||||
|
vluns = remote_client.getHostVLUNs(hostname)
|
||||||
|
else:
|
||||||
|
vluns = self.client.getHostVLUNs(hostname)
|
||||||
|
|
||||||
found_vlun = None
|
found_vlun = None
|
||||||
for vlun in vluns:
|
for vlun in vluns:
|
||||||
if volume_name in vlun['volumeName']:
|
if volume_name in vlun['volumeName']:
|
||||||
@ -1688,26 +1716,29 @@ class HPE3PARCommon(object):
|
|||||||
{'name': volume_name, 'host': hostname})
|
{'name': volume_name, 'host': hostname})
|
||||||
return found_vlun
|
return found_vlun
|
||||||
|
|
||||||
def create_vlun(self, volume, host, nsp=None, lun_id=None):
|
def create_vlun(self, volume, host, nsp=None, lun_id=None,
|
||||||
|
remote_client=None):
|
||||||
"""Create a VLUN.
|
"""Create a VLUN.
|
||||||
|
|
||||||
In order to export a volume on a 3PAR box, we have to create a VLUN.
|
In order to export a volume on a 3PAR box, we have to create a VLUN.
|
||||||
"""
|
"""
|
||||||
volume_name = self._get_3par_vol_name(volume['id'])
|
volume_name = self._get_3par_vol_name(volume['id'])
|
||||||
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp,
|
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp,
|
||||||
lun_id=lun_id)
|
lun_id=lun_id,
|
||||||
|
remote_client=remote_client)
|
||||||
return self._get_vlun(volume_name,
|
return self._get_vlun(volume_name,
|
||||||
host['name'],
|
host['name'],
|
||||||
vlun_info['lun_id'],
|
vlun_info['lun_id'],
|
||||||
nsp)
|
nsp,
|
||||||
|
remote_client)
|
||||||
|
|
||||||
def delete_vlun(self, volume, hostname, wwn=None, iqn=None):
|
def _delete_vlun(self, client_obj, volume, hostname, wwn=None, iqn=None):
|
||||||
volume_name = self._get_3par_vol_name(volume['id'])
|
volume_name = self._get_3par_vol_name(volume['id'])
|
||||||
if hostname:
|
if hostname:
|
||||||
vluns = self.client.getHostVLUNs(hostname)
|
vluns = client_obj.getHostVLUNs(hostname)
|
||||||
else:
|
else:
|
||||||
# In case of 'force detach', hostname is None
|
# In case of 'force detach', hostname is None
|
||||||
vluns = self.client.getVLUNs()['members']
|
vluns = client_obj.getVLUNs()['members']
|
||||||
|
|
||||||
# When deleteing VLUNs, you simply need to remove the template VLUN
|
# When deleteing VLUNs, you simply need to remove the template VLUN
|
||||||
# and any active VLUNs will be automatically removed. The template
|
# and any active VLUNs will be automatically removed. The template
|
||||||
@ -1732,19 +1763,19 @@ class HPE3PARCommon(object):
|
|||||||
if hostname is None:
|
if hostname is None:
|
||||||
hostname = vlun.get('hostname')
|
hostname = vlun.get('hostname')
|
||||||
if 'portPos' in vlun:
|
if 'portPos' in vlun:
|
||||||
self.client.deleteVLUN(volume_name, vlun['lun'],
|
client_obj.deleteVLUN(volume_name, vlun['lun'],
|
||||||
hostname=hostname,
|
hostname=hostname,
|
||||||
port=vlun['portPos'])
|
port=vlun['portPos'])
|
||||||
else:
|
else:
|
||||||
self.client.deleteVLUN(volume_name, vlun['lun'],
|
client_obj.deleteVLUN(volume_name, vlun['lun'],
|
||||||
hostname=hostname)
|
hostname=hostname)
|
||||||
|
|
||||||
# Determine if there are other volumes attached to the host.
|
# Determine if there are other volumes attached to the host.
|
||||||
# This will determine whether we should try removing host from host set
|
# This will determine whether we should try removing host from host set
|
||||||
# and deleting the host.
|
# and deleting the host.
|
||||||
vluns = []
|
vluns = []
|
||||||
try:
|
try:
|
||||||
vluns = self.client.getHostVLUNs(hostname)
|
vluns = client_obj.getHostVLUNs(hostname)
|
||||||
except hpeexceptions.HTTPNotFound:
|
except hpeexceptions.HTTPNotFound:
|
||||||
LOG.debug("All VLUNs removed from host %s", hostname)
|
LOG.debug("All VLUNs removed from host %s", hostname)
|
||||||
|
|
||||||
@ -1772,7 +1803,7 @@ class HPE3PARCommon(object):
|
|||||||
try:
|
try:
|
||||||
# TODO(sonivi): since multiattach is not supported for now,
|
# TODO(sonivi): since multiattach is not supported for now,
|
||||||
# delete only single host, if its not exported to volume.
|
# delete only single host, if its not exported to volume.
|
||||||
self._delete_3par_host(hostname)
|
self._delete_3par_host(hostname, client_obj)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
# Any exception down here is only logged. The vlun is deleted.
|
# Any exception down here is only logged. The vlun is deleted.
|
||||||
|
|
||||||
@ -1790,13 +1821,13 @@ class HPE3PARCommon(object):
|
|||||||
'reason': ex.get_description()})
|
'reason': ex.get_description()})
|
||||||
elif modify_host:
|
elif modify_host:
|
||||||
if wwn is not None:
|
if wwn is not None:
|
||||||
mod_request = {'pathOperation': self.client.HOST_EDIT_REMOVE,
|
mod_request = {'pathOperation': client_obj.HOST_EDIT_REMOVE,
|
||||||
'FCWWNs': wwn}
|
'FCWWNs': wwn}
|
||||||
else:
|
else:
|
||||||
mod_request = {'pathOperation': self.client.HOST_EDIT_REMOVE,
|
mod_request = {'pathOperation': client_obj.HOST_EDIT_REMOVE,
|
||||||
'iSCSINames': iqn}
|
'iSCSINames': iqn}
|
||||||
try:
|
try:
|
||||||
self.client.modifyHost(hostname, mod_request)
|
client_obj.modifyHost(hostname, mod_request)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.info("3PAR vlun for volume '%(name)s' was deleted, "
|
LOG.info("3PAR vlun for volume '%(name)s' was deleted, "
|
||||||
"but the host '%(host)s' was not Modified "
|
"but the host '%(host)s' was not Modified "
|
||||||
@ -1804,6 +1835,12 @@ class HPE3PARCommon(object):
|
|||||||
{'name': volume_name, 'host': hostname,
|
{'name': volume_name, 'host': hostname,
|
||||||
'reason': ex.get_description()})
|
'reason': ex.get_description()})
|
||||||
|
|
||||||
|
def delete_vlun(self, volume, hostname, wwn=None, iqn=None,
|
||||||
|
remote_client=None):
|
||||||
|
self._delete_vlun(self.client, volume, hostname, wwn, iqn)
|
||||||
|
if remote_client:
|
||||||
|
self._delete_vlun(remote_client, volume, hostname, wwn, iqn)
|
||||||
|
|
||||||
def _get_volume_type(self, type_id):
|
def _get_volume_type(self, type_id):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
return volume_types.get_volume_type(ctxt, type_id)
|
return volume_types.get_volume_type(ctxt, type_id)
|
||||||
@ -3031,7 +3068,8 @@ class HPE3PARCommon(object):
|
|||||||
if wwn.upper() == fc['wwn'].upper():
|
if wwn.upper() == fc['wwn'].upper():
|
||||||
return host['name']
|
return host['name']
|
||||||
|
|
||||||
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
|
def terminate_connection(self, volume, hostname, wwn=None, iqn=None,
|
||||||
|
remote_client=None):
|
||||||
"""Driver entry point to detach a volume from an instance."""
|
"""Driver entry point to detach a volume from an instance."""
|
||||||
if volume.multiattach:
|
if volume.multiattach:
|
||||||
attachment_list = volume.volume_attachment
|
attachment_list = volume.volume_attachment
|
||||||
@ -3062,7 +3100,8 @@ class HPE3PARCommon(object):
|
|||||||
hostname = hosts['members'][0]['name']
|
hostname = hosts['members'][0]['name']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn)
|
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn,
|
||||||
|
remote_client=remote_client)
|
||||||
return
|
return
|
||||||
except hpeexceptions.HTTPNotFound as e:
|
except hpeexceptions.HTTPNotFound as e:
|
||||||
if 'host does not exist' in e.get_description():
|
if 'host does not exist' in e.get_description():
|
||||||
@ -3099,7 +3138,8 @@ class HPE3PARCommon(object):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
# try again with name retrieved from 3par
|
# try again with name retrieved from 3par
|
||||||
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn)
|
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn,
|
||||||
|
remote_client=remote_client)
|
||||||
|
|
||||||
def build_nsp(self, portPos):
|
def build_nsp(self, portPos):
|
||||||
return '%s:%s:%s' % (portPos['node'],
|
return '%s:%s:%s' % (portPos['node'],
|
||||||
@ -3483,7 +3523,7 @@ class HPE3PARCommon(object):
|
|||||||
LOG.info("Volume %(volume)s succesfully reverted to %(snap)s.",
|
LOG.info("Volume %(volume)s succesfully reverted to %(snap)s.",
|
||||||
{'volume': volume_name, 'snap': snapshot_name})
|
{'volume': volume_name, 'snap': snapshot_name})
|
||||||
|
|
||||||
def find_existing_vlun(self, volume, host):
|
def find_existing_vlun(self, volume, host, remote_client=None):
|
||||||
"""Finds an existing VLUN for a volume on a host.
|
"""Finds an existing VLUN for a volume on a host.
|
||||||
|
|
||||||
Returns an existing VLUN's information. If no existing VLUN is found,
|
Returns an existing VLUN's information. If no existing VLUN is found,
|
||||||
@ -3495,7 +3535,10 @@ class HPE3PARCommon(object):
|
|||||||
existing_vlun = None
|
existing_vlun = None
|
||||||
try:
|
try:
|
||||||
vol_name = self._get_3par_vol_name(volume['id'])
|
vol_name = self._get_3par_vol_name(volume['id'])
|
||||||
host_vluns = self.client.getHostVLUNs(host['name'])
|
if remote_client:
|
||||||
|
host_vluns = remote_client.getHostVLUNs(host['name'])
|
||||||
|
else:
|
||||||
|
host_vluns = self.client.getHostVLUNs(host['name'])
|
||||||
|
|
||||||
# The first existing VLUN found will be returned.
|
# The first existing VLUN found will be returned.
|
||||||
for vlun in host_vluns:
|
for vlun in host_vluns:
|
||||||
@ -3510,11 +3553,14 @@ class HPE3PARCommon(object):
|
|||||||
'vol': vol_name})
|
'vol': vol_name})
|
||||||
return existing_vlun
|
return existing_vlun
|
||||||
|
|
||||||
def find_existing_vluns(self, volume, host):
|
def find_existing_vluns(self, volume, host, remote_client=None):
|
||||||
existing_vluns = []
|
existing_vluns = []
|
||||||
try:
|
try:
|
||||||
vol_name = self._get_3par_vol_name(volume['id'])
|
vol_name = self._get_3par_vol_name(volume['id'])
|
||||||
host_vluns = self.client.getHostVLUNs(host['name'])
|
if remote_client:
|
||||||
|
host_vluns = remote_client.getHostVLUNs(host['name'])
|
||||||
|
else:
|
||||||
|
host_vluns = self.client.getHostVLUNs(host['name'])
|
||||||
|
|
||||||
for vlun in host_vluns:
|
for vlun in host_vluns:
|
||||||
if vlun['volumeName'] == vol_name:
|
if vlun['volumeName'] == vol_name:
|
||||||
|
@ -113,10 +113,11 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
4.0.5 - Set proper backend on subsequent operation, after group
|
4.0.5 - Set proper backend on subsequent operation, after group
|
||||||
failover. bug #1773069
|
failover. bug #1773069
|
||||||
4.0.6 - Set NSP for single path attachments. Bug #1809249
|
4.0.6 - Set NSP for single path attachments. Bug #1809249
|
||||||
|
4.0.7 - Added Peer Persistence feature
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "4.0.5"
|
VERSION = "4.0.7"
|
||||||
|
|
||||||
# The name of the CI wiki page.
|
# The name of the CI wiki page.
|
||||||
CI_WIKI_NAME = "HPE_Storage_CI"
|
CI_WIKI_NAME = "HPE_Storage_CI"
|
||||||
@ -126,6 +127,43 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
self.lookup_service = fczm_utils.create_lookup_service()
|
self.lookup_service = fczm_utils.create_lookup_service()
|
||||||
self.protocol = 'FC'
|
self.protocol = 'FC'
|
||||||
|
|
||||||
|
def _initialize_connection_common(self, volume, connector, common, host,
|
||||||
|
target_wwns, init_targ_map, numPaths,
|
||||||
|
remote_client=None):
|
||||||
|
# check if a VLUN already exists for this host
|
||||||
|
existing_vlun = common.find_existing_vlun(volume, host, remote_client)
|
||||||
|
|
||||||
|
vlun = None
|
||||||
|
if existing_vlun is None:
|
||||||
|
# now that we have a host, create the VLUN
|
||||||
|
if self.lookup_service and numPaths == 1:
|
||||||
|
nsp = None
|
||||||
|
active_fc_port_list = (
|
||||||
|
common.get_active_fc_target_ports(remote_client))
|
||||||
|
for port in active_fc_port_list:
|
||||||
|
if port['portWWN'].lower() == target_wwns[0].lower():
|
||||||
|
nsp = port['nsp']
|
||||||
|
break
|
||||||
|
vlun = common.create_vlun(volume, host, nsp, None,
|
||||||
|
remote_client)
|
||||||
|
else:
|
||||||
|
vlun = common.create_vlun(volume, host, None, None,
|
||||||
|
remote_client)
|
||||||
|
else:
|
||||||
|
vlun = existing_vlun
|
||||||
|
|
||||||
|
info_backend = {'driver_volume_type': 'fibre_channel',
|
||||||
|
'data': {'target_lun': vlun['lun'],
|
||||||
|
'target_discovered': True,
|
||||||
|
'target_wwn': target_wwns,
|
||||||
|
'initiator_target_map': init_targ_map}}
|
||||||
|
|
||||||
|
encryption_key_id = volume.get('encryption_key_id')
|
||||||
|
info_backend['data']['encrypted'] = encryption_key_id is not None
|
||||||
|
fczm_utils.add_fc_zone(info_backend)
|
||||||
|
|
||||||
|
return info_backend
|
||||||
|
|
||||||
@utils.trace
|
@utils.trace
|
||||||
@coordination.synchronized('3par-{volume.id}')
|
@coordination.synchronized('3par-{volume.id}')
|
||||||
def initialize_connection(self, volume, connector):
|
def initialize_connection(self, volume, connector):
|
||||||
@ -167,16 +205,20 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
* Create a VLUN for that HOST with the volume we want to export.
|
* Create a VLUN for that HOST with the volume we want to export.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
LOG.debug("volume id: %(volume_id)s",
|
||||||
|
{'volume_id': volume['id']})
|
||||||
array_id = self.get_volume_replication_driver_data(volume)
|
array_id = self.get_volume_replication_driver_data(volume)
|
||||||
common = self._login(array_id=array_id)
|
common = self._login(array_id=array_id)
|
||||||
try:
|
try:
|
||||||
# we have to make sure we have a host
|
# we have to make sure we have a host
|
||||||
host = self._create_host(common, volume, connector)
|
host, cpg = self._create_host(common, volume, connector)
|
||||||
target_wwns, init_targ_map, numPaths = (
|
target_wwns, init_targ_map, numPaths = (
|
||||||
self._build_initiator_target_map(common, connector))
|
self._build_initiator_target_map(common, connector))
|
||||||
|
|
||||||
multipath = connector.get('multipath')
|
multipath = connector.get('multipath')
|
||||||
LOG.debug("multipath: %s", multipath)
|
LOG.debug("multipath: %(multipath)s",
|
||||||
|
{'multipath': multipath})
|
||||||
|
|
||||||
user_target = None
|
user_target = None
|
||||||
if not multipath:
|
if not multipath:
|
||||||
user_target = self._get_user_target(common)
|
user_target = self._get_user_target(common)
|
||||||
@ -188,34 +230,64 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
target_wwns = [user_target]
|
target_wwns = [user_target]
|
||||||
init_targ_map[initiator] = [user_target]
|
init_targ_map[initiator] = [user_target]
|
||||||
|
|
||||||
# check if a VLUN already exists for this host
|
info = self._initialize_connection_common(
|
||||||
existing_vlun = common.find_existing_vlun(volume, host)
|
volume, connector, common, host,
|
||||||
|
target_wwns, init_targ_map, numPaths)
|
||||||
|
|
||||||
vlun = None
|
if not multipath:
|
||||||
if existing_vlun is None:
|
return info
|
||||||
# now that we have a host, create the VLUN
|
|
||||||
if self.lookup_service is not None and numPaths == 1:
|
if volume.get('replication_status') != 'enabled':
|
||||||
nsp = None
|
return info
|
||||||
active_fc_port_list = common.get_active_fc_target_ports()
|
|
||||||
for port in active_fc_port_list:
|
LOG.debug('This is a replication setup')
|
||||||
if port['portWWN'].lower() == target_wwns[0].lower():
|
|
||||||
nsp = port['nsp']
|
remote_target = common._replication_targets[0]
|
||||||
break
|
replication_mode = remote_target['replication_mode']
|
||||||
vlun = common.create_vlun(volume, host, nsp)
|
quorum_witness_ip = remote_target.get('quorum_witness_ip')
|
||||||
|
|
||||||
|
if replication_mode == 1:
|
||||||
|
LOG.debug('replication_mode is sync')
|
||||||
|
if quorum_witness_ip:
|
||||||
|
LOG.debug('quorum_witness_ip is present')
|
||||||
|
LOG.debug('Peer Persistence has been configured')
|
||||||
else:
|
else:
|
||||||
vlun = common.create_vlun(volume, host)
|
LOG.debug('Since quorum_witness_ip is absent, '
|
||||||
|
'considering this as Active/Passive '
|
||||||
|
'replication')
|
||||||
|
return info
|
||||||
else:
|
else:
|
||||||
vlun = existing_vlun
|
LOG.debug('Active/Passive replication has been '
|
||||||
|
'configured')
|
||||||
|
return info
|
||||||
|
|
||||||
|
# Peer Persistence has been configured
|
||||||
|
remote_client = common._create_replication_client(remote_target)
|
||||||
|
|
||||||
|
host, cpg = self._create_host(
|
||||||
|
common, volume, connector,
|
||||||
|
remote_target, cpg, remote_client)
|
||||||
|
target_wwns, init_targ_map, numPaths = (
|
||||||
|
self._build_initiator_target_map(
|
||||||
|
common, connector, remote_client))
|
||||||
|
|
||||||
|
info_peer = self._initialize_connection_common(
|
||||||
|
volume, connector, common, host,
|
||||||
|
target_wwns, init_targ_map, numPaths,
|
||||||
|
remote_client)
|
||||||
|
|
||||||
|
common._destroy_replication_client(remote_client)
|
||||||
|
|
||||||
info = {'driver_volume_type': 'fibre_channel',
|
info = {'driver_volume_type': 'fibre_channel',
|
||||||
'data': {'target_lun': vlun['lun'],
|
'data': {'encrypted': info['data']['encrypted'],
|
||||||
|
'target_lun': info['data']['target_lun'],
|
||||||
'target_discovered': True,
|
'target_discovered': True,
|
||||||
'target_wwn': target_wwns,
|
'target_wwn': info['data']['target_wwn'] +
|
||||||
'initiator_target_map': init_targ_map}}
|
info_peer['data']['target_wwn'],
|
||||||
|
'initiator_target_map': self.merge_dicts(
|
||||||
|
info['data']['initiator_target_map'],
|
||||||
|
info_peer['data']['initiator_target_map'])}}
|
||||||
|
|
||||||
encryption_key_id = volume.get('encryption_key_id', None)
|
|
||||||
info['data']['encrypted'] = encryption_key_id is not None
|
|
||||||
fczm_utils.add_fc_zone(info)
|
|
||||||
return info
|
return info
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
@ -228,6 +300,39 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
common = self._login(array_id=array_id)
|
common = self._login(array_id=array_id)
|
||||||
try:
|
try:
|
||||||
is_force_detach = connector is None
|
is_force_detach = connector is None
|
||||||
|
|
||||||
|
remote_client = None
|
||||||
|
multipath = False
|
||||||
|
if connector:
|
||||||
|
multipath = connector.get('multipath')
|
||||||
|
LOG.debug("multipath: %(multipath)s",
|
||||||
|
{'multipath': multipath})
|
||||||
|
if multipath:
|
||||||
|
if volume.get('replication_status') == 'enabled':
|
||||||
|
LOG.debug('This is a replication setup')
|
||||||
|
|
||||||
|
remote_target = common._replication_targets[0]
|
||||||
|
replication_mode = remote_target['replication_mode']
|
||||||
|
quorum_witness_ip = (
|
||||||
|
remote_target.get('quorum_witness_ip'))
|
||||||
|
|
||||||
|
if replication_mode == 1:
|
||||||
|
LOG.debug('replication_mode is sync')
|
||||||
|
if quorum_witness_ip:
|
||||||
|
LOG.debug('quorum_witness_ip is present')
|
||||||
|
LOG.debug('Peer Persistence has been configured')
|
||||||
|
else:
|
||||||
|
LOG.debug('Since quorum_witness_ip is absent, '
|
||||||
|
'considering this as Active/Passive '
|
||||||
|
'replication')
|
||||||
|
else:
|
||||||
|
LOG.debug('Active/Passive replication has been '
|
||||||
|
'configured')
|
||||||
|
|
||||||
|
if replication_mode == 1 and quorum_witness_ip:
|
||||||
|
remote_client = (
|
||||||
|
common._create_replication_client(remote_target))
|
||||||
|
|
||||||
if is_force_detach:
|
if is_force_detach:
|
||||||
common.terminate_connection(volume, None, None)
|
common.terminate_connection(volume, None, None)
|
||||||
# TODO(sonivi): remove zones, if not required
|
# TODO(sonivi): remove zones, if not required
|
||||||
@ -236,7 +341,8 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
else:
|
else:
|
||||||
hostname = common._safe_hostname(connector['host'])
|
hostname = common._safe_hostname(connector['host'])
|
||||||
common.terminate_connection(volume, hostname,
|
common.terminate_connection(volume, hostname,
|
||||||
wwn=connector['wwpns'])
|
wwn=connector['wwpns'],
|
||||||
|
remote_client=remote_client)
|
||||||
|
|
||||||
zone_remove = True
|
zone_remove = True
|
||||||
try:
|
try:
|
||||||
@ -259,21 +365,61 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
if zone_remove:
|
if zone_remove:
|
||||||
LOG.info("Need to remove FC Zone, building initiator "
|
LOG.info("Need to remove FC Zone, building initiator "
|
||||||
"target map")
|
"target map")
|
||||||
target_wwns, init_targ_map, _numPaths = \
|
target_wwns, init_targ_map, _numPaths = (
|
||||||
self._build_initiator_target_map(common, connector)
|
self._build_initiator_target_map(common, connector))
|
||||||
|
|
||||||
info['data'] = {'target_wwn': target_wwns,
|
info['data'] = {'target_wwn': target_wwns,
|
||||||
'initiator_target_map': init_targ_map}
|
'initiator_target_map': init_targ_map}
|
||||||
fczm_utils.remove_fc_zone(info)
|
fczm_utils.remove_fc_zone(info)
|
||||||
|
|
||||||
|
if remote_client:
|
||||||
|
if zone_remove:
|
||||||
|
try:
|
||||||
|
vluns = remote_client.getHostVLUNs(hostname)
|
||||||
|
except hpeexceptions.HTTPNotFound:
|
||||||
|
# No more exports for this host.
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Vlun exists, so check for wwpn entry.
|
||||||
|
for wwpn in connector.get('wwpns'):
|
||||||
|
for vlun in vluns:
|
||||||
|
if (vlun.get('active') and
|
||||||
|
vlun.get('remoteName') == wwpn.upper()):
|
||||||
|
zone_remove = False
|
||||||
|
break
|
||||||
|
|
||||||
|
info_peer = {'driver_volume_type': 'fibre_channel',
|
||||||
|
'data': {}}
|
||||||
|
|
||||||
|
if zone_remove:
|
||||||
|
LOG.info("Need to remove FC Zone, building initiator "
|
||||||
|
"target map")
|
||||||
|
target_wwns, init_targ_map, _numPaths = (
|
||||||
|
self._build_initiator_target_map(common, connector,
|
||||||
|
remote_client))
|
||||||
|
|
||||||
|
info_peer['data'] = {'target_wwn': target_wwns,
|
||||||
|
'initiator_target_map': init_targ_map}
|
||||||
|
fczm_utils.remove_fc_zone(info_peer)
|
||||||
|
|
||||||
|
info = (
|
||||||
|
{'driver_volume_type': 'fibre_channel',
|
||||||
|
'data': {'target_wwn': info['data']['target_wwn'] +
|
||||||
|
info_peer['data']['target_wwn'],
|
||||||
|
'initiator_target_map': self.merge_dicts(
|
||||||
|
info['data']['initiator_target_map'],
|
||||||
|
info_peer['data']['initiator_target_map'])}})
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
def _build_initiator_target_map(self, common, connector):
|
def _build_initiator_target_map(self, common, connector,
|
||||||
|
remote_client=None):
|
||||||
"""Build the target_wwns and the initiator target map."""
|
"""Build the target_wwns and the initiator target map."""
|
||||||
|
|
||||||
fc_ports = common.get_active_fc_target_ports()
|
fc_ports = common.get_active_fc_target_ports(remote_client)
|
||||||
all_target_wwns = []
|
all_target_wwns = []
|
||||||
target_wwns = []
|
target_wwns = []
|
||||||
init_targ_map = {}
|
init_targ_map = {}
|
||||||
@ -311,7 +457,7 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
return target_wwns, init_targ_map, numPaths
|
return target_wwns, init_targ_map, numPaths
|
||||||
|
|
||||||
def _create_3par_fibrechan_host(self, common, hostname, wwns,
|
def _create_3par_fibrechan_host(self, common, hostname, wwns,
|
||||||
domain, persona_id):
|
domain, persona_id, remote_client=None):
|
||||||
"""Create a 3PAR host.
|
"""Create a 3PAR host.
|
||||||
|
|
||||||
Create a 3PAR host, if there is already a host on the 3par using
|
Create a 3PAR host, if there is already a host on the 3par using
|
||||||
@ -320,7 +466,13 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
"""
|
"""
|
||||||
# first search for an existing host
|
# first search for an existing host
|
||||||
host_found = None
|
host_found = None
|
||||||
hosts = common.client.queryHost(wwns=wwns)
|
|
||||||
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = common.client
|
||||||
|
|
||||||
|
hosts = client_obj.queryHost(wwns=wwns)
|
||||||
|
|
||||||
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
|
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
|
||||||
host_found = hosts['members'][0]['name']
|
host_found = hosts['members'][0]['name']
|
||||||
@ -330,9 +482,9 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
else:
|
else:
|
||||||
persona_id = int(persona_id)
|
persona_id = int(persona_id)
|
||||||
try:
|
try:
|
||||||
common.client.createHost(hostname, FCWwns=wwns,
|
client_obj.createHost(hostname, FCWwns=wwns,
|
||||||
optional={'domain': domain,
|
optional={'domain': domain,
|
||||||
'persona': persona_id})
|
'persona': persona_id})
|
||||||
except hpeexceptions.HTTPConflict as path_conflict:
|
except hpeexceptions.HTTPConflict as path_conflict:
|
||||||
msg = "Create FC host caught HTTP conflict code: %s"
|
msg = "Create FC host caught HTTP conflict code: %s"
|
||||||
LOG.exception(msg, path_conflict.get_code())
|
LOG.exception(msg, path_conflict.get_code())
|
||||||
@ -340,7 +492,7 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
if path_conflict.get_code() is EXISTENT_PATH:
|
if path_conflict.get_code() is EXISTENT_PATH:
|
||||||
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
|
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
|
||||||
# name already used by another host
|
# name already used by another host
|
||||||
hosts = common.client.queryHost(wwns=wwns)
|
hosts = client_obj.queryHost(wwns=wwns)
|
||||||
if hosts and hosts['members'] and (
|
if hosts and hosts['members'] and (
|
||||||
'name' in hosts['members'][0]):
|
'name' in hosts['members'][0]):
|
||||||
hostname = hosts['members'][0]['name']
|
hostname = hosts['members'][0]['name']
|
||||||
@ -353,11 +505,17 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
ctxt.reraise = True
|
ctxt.reraise = True
|
||||||
return hostname
|
return hostname
|
||||||
|
|
||||||
def _modify_3par_fibrechan_host(self, common, hostname, wwn):
|
def _modify_3par_fibrechan_host(self, common, hostname, wwn,
|
||||||
mod_request = {'pathOperation': common.client.HOST_EDIT_ADD,
|
remote_client):
|
||||||
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = common.client
|
||||||
|
|
||||||
|
mod_request = {'pathOperation': client_obj.HOST_EDIT_ADD,
|
||||||
'FCWWNs': wwn}
|
'FCWWNs': wwn}
|
||||||
try:
|
try:
|
||||||
common.client.modifyHost(hostname, mod_request)
|
client_obj.modifyHost(hostname, mod_request)
|
||||||
except hpeexceptions.HTTPConflict as path_conflict:
|
except hpeexceptions.HTTPConflict as path_conflict:
|
||||||
msg = ("Modify FC Host %(hostname)s caught "
|
msg = ("Modify FC Host %(hostname)s caught "
|
||||||
"HTTP conflict code: %(code)s")
|
"HTTP conflict code: %(code)s")
|
||||||
@ -365,21 +523,34 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
{'hostname': hostname,
|
{'hostname': hostname,
|
||||||
'code': path_conflict.get_code()})
|
'code': path_conflict.get_code()})
|
||||||
|
|
||||||
def _create_host(self, common, volume, connector):
|
def _create_host(self, common, volume, connector,
|
||||||
|
remote_target=None, src_cpg=None, remote_client=None):
|
||||||
"""Creates or modifies existing 3PAR host."""
|
"""Creates or modifies existing 3PAR host."""
|
||||||
host = None
|
host = None
|
||||||
|
domain = None
|
||||||
hostname = common._safe_hostname(connector['host'])
|
hostname = common._safe_hostname(connector['host'])
|
||||||
cpg = common.get_cpg(volume, allowSnap=True)
|
if remote_target:
|
||||||
domain = common.get_domain(cpg)
|
cpg = common._get_cpg_from_cpg_map(
|
||||||
|
remote_target['cpg_map'], src_cpg)
|
||||||
|
cpg_obj = remote_client.getCPG(cpg)
|
||||||
|
if 'domain' in cpg_obj:
|
||||||
|
domain = cpg_obj['domain']
|
||||||
|
else:
|
||||||
|
cpg = common.get_cpg(volume, allowSnap=True)
|
||||||
|
domain = common.get_domain(cpg)
|
||||||
|
|
||||||
if not connector.get('multipath'):
|
if not connector.get('multipath'):
|
||||||
connector['wwpns'] = connector['wwpns'][:1]
|
connector['wwpns'] = connector['wwpns'][:1]
|
||||||
try:
|
try:
|
||||||
host = common._get_3par_host(hostname)
|
if remote_target:
|
||||||
# Check whether host with wwn of initiator present on 3par
|
host = remote_client.getHost(hostname)
|
||||||
hosts = common.client.queryHost(wwns=connector['wwpns'])
|
else:
|
||||||
host, hostname = common._get_prioritized_host_on_3par(host,
|
host = common._get_3par_host(hostname)
|
||||||
hosts,
|
# Check whether host with wwn of initiator present on 3par
|
||||||
hostname)
|
hosts = common.client.queryHost(wwns=connector['wwpns'])
|
||||||
|
host, hostname = (
|
||||||
|
common._get_prioritized_host_on_3par(
|
||||||
|
host, hosts, hostname))
|
||||||
except hpeexceptions.HTTPNotFound:
|
except hpeexceptions.HTTPNotFound:
|
||||||
# get persona from the volume type extra specs
|
# get persona from the volume type extra specs
|
||||||
persona_id = common.get_persona_type(volume)
|
persona_id = common.get_persona_type(volume)
|
||||||
@ -388,13 +559,19 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
hostname,
|
hostname,
|
||||||
connector['wwpns'],
|
connector['wwpns'],
|
||||||
domain,
|
domain,
|
||||||
persona_id)
|
persona_id,
|
||||||
host = common._get_3par_host(hostname)
|
remote_client)
|
||||||
return host
|
if remote_target:
|
||||||
|
host = remote_client.getHost(hostname)
|
||||||
|
else:
|
||||||
|
host = common._get_3par_host(hostname)
|
||||||
|
return host, cpg
|
||||||
else:
|
else:
|
||||||
return self._add_new_wwn_to_host(common, host, connector['wwpns'])
|
host = self._add_new_wwn_to_host(
|
||||||
|
common, host, connector['wwpns'], remote_client)
|
||||||
|
return host, cpg
|
||||||
|
|
||||||
def _add_new_wwn_to_host(self, common, host, wwns):
|
def _add_new_wwn_to_host(self, common, host, wwns, remote_client=None):
|
||||||
"""Add wwns to a host if one or more don't exist.
|
"""Add wwns to a host if one or more don't exist.
|
||||||
|
|
||||||
Identify if argument wwns contains any world wide names
|
Identify if argument wwns contains any world wide names
|
||||||
@ -419,8 +596,12 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
# if any wwns found that were not in host list,
|
# if any wwns found that were not in host list,
|
||||||
# add them to the host
|
# add them to the host
|
||||||
if (len(new_wwns) > 0):
|
if (len(new_wwns) > 0):
|
||||||
self._modify_3par_fibrechan_host(common, host['name'], new_wwns)
|
self._modify_3par_fibrechan_host(
|
||||||
host = common._get_3par_host(host['name'])
|
common, host['name'], new_wwns, remote_client)
|
||||||
|
if remote_client:
|
||||||
|
host = remote_client.getHost(host['name'])
|
||||||
|
else:
|
||||||
|
host = common._get_3par_host(host['name'])
|
||||||
return host
|
return host
|
||||||
|
|
||||||
def _get_user_target(self, common):
|
def _get_user_target(self, common):
|
||||||
@ -444,3 +625,8 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
"%(nsp)s", {'nsp': target_nsp})
|
"%(nsp)s", {'nsp': target_nsp})
|
||||||
|
|
||||||
return target_wwn
|
return target_wwn
|
||||||
|
|
||||||
|
def merge_dicts(self, dict_1, dict_2):
|
||||||
|
keys = set(dict_1).union(dict_2)
|
||||||
|
no = []
|
||||||
|
return {k: (dict_1.get(k, no) + dict_2.get(k, no)) for k in keys}
|
||||||
|
@ -126,10 +126,11 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
4.0.2 - Handle force detach case. bug #1686745
|
4.0.2 - Handle force detach case. bug #1686745
|
||||||
4.0.3 - Set proper backend on subsequent operation, after group
|
4.0.3 - Set proper backend on subsequent operation, after group
|
||||||
failover. bug #1773069
|
failover. bug #1773069
|
||||||
|
4.0.4 - Added Peer Persistence feature
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "4.0.2"
|
VERSION = "4.0.4"
|
||||||
|
|
||||||
# The name of the CI wiki page.
|
# The name of the CI wiki page.
|
||||||
CI_WIKI_NAME = "HPE_Storage_CI"
|
CI_WIKI_NAME = "HPE_Storage_CI"
|
||||||
@ -146,17 +147,23 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
def initialize_iscsi_ports(self, common):
|
def initialize_iscsi_ports(self, common,
|
||||||
|
remote_target=None, remote_client=None):
|
||||||
# map iscsi_ip-> ip_port
|
# map iscsi_ip-> ip_port
|
||||||
# -> iqn
|
# -> iqn
|
||||||
# -> nsp
|
# -> nsp
|
||||||
iscsi_ip_list = {}
|
iscsi_ip_list = {}
|
||||||
temp_iscsi_ip = {}
|
temp_iscsi_ip = {}
|
||||||
|
|
||||||
|
if remote_target:
|
||||||
|
backend_conf = remote_target
|
||||||
|
else:
|
||||||
|
backend_conf = common._client_conf
|
||||||
|
|
||||||
# use the 3PAR ip_addr list for iSCSI configuration
|
# use the 3PAR ip_addr list for iSCSI configuration
|
||||||
if len(common._client_conf['hpe3par_iscsi_ips']) > 0:
|
if len(backend_conf['hpe3par_iscsi_ips']) > 0:
|
||||||
# add port values to ip_addr, if necessary
|
# add port values to ip_addr, if necessary
|
||||||
for ip_addr in common._client_conf['hpe3par_iscsi_ips']:
|
for ip_addr in backend_conf['hpe3par_iscsi_ips']:
|
||||||
ip = ip_addr.split(':')
|
ip = ip_addr.split(':')
|
||||||
if len(ip) == 1:
|
if len(ip) == 1:
|
||||||
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
|
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
|
||||||
@ -168,15 +175,16 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
# add the single value iscsi_ip_address option to the IP dictionary.
|
# add the single value iscsi_ip_address option to the IP dictionary.
|
||||||
# This way we can see if it's a valid iSCSI IP. If it's not valid,
|
# This way we can see if it's a valid iSCSI IP. If it's not valid,
|
||||||
# we won't use it and won't bother to report it, see below
|
# we won't use it and won't bother to report it, see below
|
||||||
if (common._client_conf['iscsi_ip_address'] not in temp_iscsi_ip):
|
if 'iscsi_ip_address' in backend_conf:
|
||||||
ip = common._client_conf['iscsi_ip_address']
|
if (backend_conf['iscsi_ip_address'] not in temp_iscsi_ip):
|
||||||
ip_port = common._client_conf['iscsi_port']
|
ip = backend_conf['iscsi_ip_address']
|
||||||
temp_iscsi_ip[ip] = {'ip_port': ip_port}
|
ip_port = backend_conf['iscsi_port']
|
||||||
|
temp_iscsi_ip[ip] = {'ip_port': ip_port}
|
||||||
|
|
||||||
# get all the valid iSCSI ports from 3PAR
|
# get all the valid iSCSI ports from 3PAR
|
||||||
# when found, add the valid iSCSI ip, ip port, iqn and nsp
|
# when found, add the valid iSCSI ip, ip port, iqn and nsp
|
||||||
# to the iSCSI IP dictionary
|
# to the iSCSI IP dictionary
|
||||||
iscsi_ports = common.get_active_iscsi_target_ports()
|
iscsi_ports = common.get_active_iscsi_target_ports(remote_client)
|
||||||
|
|
||||||
for port in iscsi_ports:
|
for port in iscsi_ports:
|
||||||
ip = port['IPAddr']
|
ip = port['IPAddr']
|
||||||
@ -190,8 +198,9 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
# if the single value iscsi_ip_address option is still in the
|
# if the single value iscsi_ip_address option is still in the
|
||||||
# temp dictionary it's because it defaults to $my_ip which doesn't
|
# temp dictionary it's because it defaults to $my_ip which doesn't
|
||||||
# make sense in this context. So, if present, remove it and move on.
|
# make sense in this context. So, if present, remove it and move on.
|
||||||
if common._client_conf['iscsi_ip_address'] in temp_iscsi_ip:
|
if 'iscsi_ip_address' in backend_conf:
|
||||||
del temp_iscsi_ip[common._client_conf['iscsi_ip_address']]
|
if backend_conf['iscsi_ip_address'] in temp_iscsi_ip:
|
||||||
|
del temp_iscsi_ip[backend_conf['iscsi_ip_address']]
|
||||||
|
|
||||||
# lets see if there are invalid iSCSI IPs left in the temp dict
|
# lets see if there are invalid iSCSI IPs left in the temp dict
|
||||||
if len(temp_iscsi_ip) > 0:
|
if len(temp_iscsi_ip) > 0:
|
||||||
@ -204,7 +213,59 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
msg = _('At least one valid iSCSI IP address must be set.')
|
msg = _('At least one valid iSCSI IP address must be set.')
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list
|
|
||||||
|
if remote_target:
|
||||||
|
self.iscsi_ips[remote_target['hpe3par_api_url']] = iscsi_ip_list
|
||||||
|
else:
|
||||||
|
self.iscsi_ips[common._client_conf['hpe3par_api_url']] = (
|
||||||
|
iscsi_ip_list)
|
||||||
|
|
||||||
|
def _initialize_connection_common(self, volume, connector, common,
|
||||||
|
host, iscsi_ips, ready_ports,
|
||||||
|
target_portals, target_iqns, target_luns,
|
||||||
|
remote_client=None):
|
||||||
|
|
||||||
|
# Target portal ips are defined in cinder.conf.
|
||||||
|
target_portal_ips = iscsi_ips.keys()
|
||||||
|
|
||||||
|
# Collect all existing VLUNs for this volume/host combination.
|
||||||
|
existing_vluns = common.find_existing_vluns(volume, host,
|
||||||
|
remote_client)
|
||||||
|
|
||||||
|
# Cycle through each ready iSCSI port and determine if a new
|
||||||
|
# VLUN should be created or an existing one used.
|
||||||
|
lun_id = None
|
||||||
|
for port in ready_ports:
|
||||||
|
iscsi_ip = port['IPAddr']
|
||||||
|
if iscsi_ip in target_portal_ips:
|
||||||
|
vlun = None
|
||||||
|
# check for an already existing VLUN matching the
|
||||||
|
# nsp for this iSCSI IP. If one is found, use it
|
||||||
|
# instead of creating a new VLUN.
|
||||||
|
for v in existing_vluns:
|
||||||
|
portPos = common.build_portPos(
|
||||||
|
iscsi_ips[iscsi_ip]['nsp'])
|
||||||
|
if v['portPos'] == portPos:
|
||||||
|
vlun = v
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
vlun = common.create_vlun(
|
||||||
|
volume, host, iscsi_ips[iscsi_ip]['nsp'],
|
||||||
|
lun_id=lun_id, remote_client=remote_client)
|
||||||
|
|
||||||
|
# We want to use the same LUN ID for every port
|
||||||
|
if lun_id is None:
|
||||||
|
lun_id = vlun['lun']
|
||||||
|
|
||||||
|
iscsi_ip_port = "%s:%s" % (
|
||||||
|
iscsi_ip, iscsi_ips[iscsi_ip]['ip_port'])
|
||||||
|
target_portals.append(iscsi_ip_port)
|
||||||
|
target_iqns.append(port['iSCSIName'])
|
||||||
|
target_luns.append(vlun['lun'])
|
||||||
|
else:
|
||||||
|
LOG.warning("iSCSI IP: '%s' was not found in "
|
||||||
|
"hpe3par_iscsi_ips list defined in "
|
||||||
|
"cinder.conf.", iscsi_ip)
|
||||||
|
|
||||||
@utils.trace
|
@utils.trace
|
||||||
@coordination.synchronized('3par-{volume.id}')
|
@coordination.synchronized('3par-{volume.id}')
|
||||||
@ -236,6 +297,8 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
* Create a host on the 3par
|
* Create a host on the 3par
|
||||||
* create vlun on the 3par
|
* create vlun on the 3par
|
||||||
"""
|
"""
|
||||||
|
LOG.debug("volume id: %(volume_id)s",
|
||||||
|
{'volume_id': volume['id']})
|
||||||
array_id = self.get_volume_replication_driver_data(volume)
|
array_id = self.get_volume_replication_driver_data(volume)
|
||||||
common = self._login(array_id=array_id)
|
common = self._login(array_id=array_id)
|
||||||
try:
|
try:
|
||||||
@ -249,12 +312,15 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
|
iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
|
||||||
|
|
||||||
# we have to make sure we have a host
|
# we have to make sure we have a host
|
||||||
host, username, password = self._create_host(
|
host, username, password, cpg = self._create_host(
|
||||||
common,
|
common,
|
||||||
volume,
|
volume,
|
||||||
connector)
|
connector)
|
||||||
|
|
||||||
if connector.get('multipath'):
|
multipath = connector.get('multipath')
|
||||||
|
LOG.debug("multipath: %(multipath)s",
|
||||||
|
{'multipath': multipath})
|
||||||
|
if multipath:
|
||||||
ready_ports = common.client.getiSCSIPorts(
|
ready_ports = common.client.getiSCSIPorts(
|
||||||
state=common.client.PORT_STATE_READY)
|
state=common.client.PORT_STATE_READY)
|
||||||
|
|
||||||
@ -262,45 +328,57 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
target_iqns = []
|
target_iqns = []
|
||||||
target_luns = []
|
target_luns = []
|
||||||
|
|
||||||
# Target portal ips are defined in cinder.conf.
|
self._initialize_connection_common(
|
||||||
target_portal_ips = iscsi_ips.keys()
|
volume, connector, common,
|
||||||
|
host, iscsi_ips, ready_ports,
|
||||||
|
target_portals, target_iqns, target_luns)
|
||||||
|
|
||||||
# Collect all existing VLUNs for this volume/host combination.
|
if volume.get('replication_status') == 'enabled':
|
||||||
existing_vluns = common.find_existing_vluns(volume, host)
|
LOG.debug('This is a replication setup')
|
||||||
|
|
||||||
# Cycle through each ready iSCSI port and determine if a new
|
remote_target = common._replication_targets[0]
|
||||||
# VLUN should be created or an existing one used.
|
replication_mode = remote_target['replication_mode']
|
||||||
lun_id = None
|
quorum_witness_ip = (
|
||||||
for port in ready_ports:
|
remote_target.get('quorum_witness_ip'))
|
||||||
iscsi_ip = port['IPAddr']
|
|
||||||
if iscsi_ip in target_portal_ips:
|
if replication_mode == 1:
|
||||||
vlun = None
|
LOG.debug('replication_mode is sync')
|
||||||
# check for an already existing VLUN matching the
|
if quorum_witness_ip:
|
||||||
# nsp for this iSCSI IP. If one is found, use it
|
LOG.debug('quorum_witness_ip is present')
|
||||||
# instead of creating a new VLUN.
|
LOG.debug('Peer Persistence has been configured')
|
||||||
for v in existing_vluns:
|
|
||||||
portPos = common.build_portPos(
|
|
||||||
iscsi_ips[iscsi_ip]['nsp'])
|
|
||||||
if v['portPos'] == portPos:
|
|
||||||
vlun = v
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
vlun = common.create_vlun(
|
LOG.debug('Since quorum_witness_ip is absent, '
|
||||||
volume, host, iscsi_ips[iscsi_ip]['nsp'],
|
'considering this as Active/Passive '
|
||||||
lun_id=lun_id)
|
'replication')
|
||||||
|
|
||||||
# We want to use the same LUN ID for every port
|
|
||||||
if lun_id is None:
|
|
||||||
lun_id = vlun['lun']
|
|
||||||
iscsi_ip_port = "%s:%s" % (
|
|
||||||
iscsi_ip, iscsi_ips[iscsi_ip]['ip_port'])
|
|
||||||
target_portals.append(iscsi_ip_port)
|
|
||||||
target_iqns.append(port['iSCSIName'])
|
|
||||||
target_luns.append(vlun['lun'])
|
|
||||||
else:
|
else:
|
||||||
LOG.warning("iSCSI IP: '%s' was not found in "
|
LOG.debug('Active/Passive replication has been '
|
||||||
"hpe3par_iscsi_ips list defined in "
|
'configured')
|
||||||
"cinder.conf.", iscsi_ip)
|
|
||||||
|
if replication_mode == 1 and quorum_witness_ip:
|
||||||
|
remote_client = (
|
||||||
|
common._create_replication_client(remote_target))
|
||||||
|
|
||||||
|
self.initialize_iscsi_ports(
|
||||||
|
common, remote_target, remote_client)
|
||||||
|
remote_iscsi_ips = (
|
||||||
|
self.iscsi_ips[remote_target['hpe3par_api_url']])
|
||||||
|
|
||||||
|
# we have to make sure we have a host
|
||||||
|
host, username, password, cpg = (
|
||||||
|
self._create_host(
|
||||||
|
common, volume, connector,
|
||||||
|
remote_target, cpg, remote_client))
|
||||||
|
|
||||||
|
ready_ports = remote_client.getiSCSIPorts(
|
||||||
|
state=remote_client.PORT_STATE_READY)
|
||||||
|
|
||||||
|
self._initialize_connection_common(
|
||||||
|
volume, connector, common,
|
||||||
|
host, remote_iscsi_ips, ready_ports,
|
||||||
|
target_portals, target_iqns, target_luns,
|
||||||
|
remote_client)
|
||||||
|
|
||||||
|
common._destroy_replication_client(remote_client)
|
||||||
|
|
||||||
info = {'driver_volume_type': 'iscsi',
|
info = {'driver_volume_type': 'iscsi',
|
||||||
'data': {'target_portals': target_portals,
|
'data': {'target_portals': target_portals,
|
||||||
@ -373,6 +451,39 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
common = self._login(array_id=array_id)
|
common = self._login(array_id=array_id)
|
||||||
try:
|
try:
|
||||||
is_force_detach = connector is None
|
is_force_detach = connector is None
|
||||||
|
|
||||||
|
remote_client = None
|
||||||
|
multipath = False
|
||||||
|
if connector:
|
||||||
|
multipath = connector.get('multipath')
|
||||||
|
LOG.debug("multipath: %(multipath)s",
|
||||||
|
{'multipath': multipath})
|
||||||
|
if multipath:
|
||||||
|
if volume.get('replication_status') == 'enabled':
|
||||||
|
LOG.debug('This is a replication setup')
|
||||||
|
|
||||||
|
remote_target = common._replication_targets[0]
|
||||||
|
replication_mode = remote_target['replication_mode']
|
||||||
|
quorum_witness_ip = (
|
||||||
|
remote_target.get('quorum_witness_ip'))
|
||||||
|
|
||||||
|
if replication_mode == 1:
|
||||||
|
LOG.debug('replication_mode is sync')
|
||||||
|
if quorum_witness_ip:
|
||||||
|
LOG.debug('quorum_witness_ip is present')
|
||||||
|
LOG.debug('Peer Persistence has been configured')
|
||||||
|
else:
|
||||||
|
LOG.debug('Since quorum_witness_ip is absent, '
|
||||||
|
'considering this as Active/Passive '
|
||||||
|
'replication')
|
||||||
|
else:
|
||||||
|
LOG.debug('Active/Passive replication has been '
|
||||||
|
'configured')
|
||||||
|
|
||||||
|
if replication_mode == 1 and quorum_witness_ip:
|
||||||
|
remote_client = (
|
||||||
|
common._create_replication_client(remote_target))
|
||||||
|
|
||||||
if is_force_detach:
|
if is_force_detach:
|
||||||
common.terminate_connection(volume, None, None)
|
common.terminate_connection(volume, None, None)
|
||||||
else:
|
else:
|
||||||
@ -380,7 +491,8 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
common.terminate_connection(
|
common.terminate_connection(
|
||||||
volume,
|
volume,
|
||||||
hostname,
|
hostname,
|
||||||
iqn=connector['initiator'])
|
iqn=connector['initiator'],
|
||||||
|
remote_client=remote_client)
|
||||||
self._clear_chap_3par(common, volume)
|
self._clear_chap_3par(common, volume)
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
@ -407,7 +519,7 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain,
|
def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain,
|
||||||
persona_id):
|
persona_id, remote_client=None):
|
||||||
"""Create a 3PAR host.
|
"""Create a 3PAR host.
|
||||||
|
|
||||||
Create a 3PAR host, if there is already a host on the 3par using
|
Create a 3PAR host, if there is already a host on the 3par using
|
||||||
@ -417,7 +529,12 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
# first search for an existing host
|
# first search for an existing host
|
||||||
host_found = None
|
host_found = None
|
||||||
|
|
||||||
hosts = common.client.queryHost(iqns=iscsi_iqn)
|
if remote_client:
|
||||||
|
client_obj = remote_client
|
||||||
|
else:
|
||||||
|
client_obj = common.client
|
||||||
|
|
||||||
|
hosts = client_obj.queryHost(iqns=iscsi_iqn)
|
||||||
|
|
||||||
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
|
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
|
||||||
host_found = hosts['members'][0]['name']
|
host_found = hosts['members'][0]['name']
|
||||||
@ -427,16 +544,16 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
else:
|
else:
|
||||||
persona_id = int(persona_id)
|
persona_id = int(persona_id)
|
||||||
try:
|
try:
|
||||||
common.client.createHost(hostname, iscsiNames=iscsi_iqn,
|
client_obj.createHost(hostname, iscsiNames=iscsi_iqn,
|
||||||
optional={'domain': domain,
|
optional={'domain': domain,
|
||||||
'persona': persona_id})
|
'persona': persona_id})
|
||||||
except hpeexceptions.HTTPConflict as path_conflict:
|
except hpeexceptions.HTTPConflict as path_conflict:
|
||||||
msg = "Create iSCSI host caught HTTP conflict code: %s"
|
msg = "Create iSCSI host caught HTTP conflict code: %s"
|
||||||
with save_and_reraise_exception(reraise=False) as ctxt:
|
with save_and_reraise_exception(reraise=False) as ctxt:
|
||||||
if path_conflict.get_code() is EXISTENT_PATH:
|
if path_conflict.get_code() is EXISTENT_PATH:
|
||||||
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
|
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
|
||||||
# name already used by another host
|
# name already used by another host
|
||||||
hosts = common.client.queryHost(iqns=iscsi_iqn)
|
hosts = client_obj.queryHost(iqns=iscsi_iqn)
|
||||||
if hosts and hosts['members'] and (
|
if hosts and hosts['members'] and (
|
||||||
'name' in hosts['members'][0]):
|
'name' in hosts['members'][0]):
|
||||||
hostname = hosts['members'][0]['name']
|
hostname = hosts['members'][0]['name']
|
||||||
@ -468,31 +585,45 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
'chapSecret': password}
|
'chapSecret': password}
|
||||||
common.client.modifyHost(hostname, mod_request)
|
common.client.modifyHost(hostname, mod_request)
|
||||||
|
|
||||||
def _create_host(self, common, volume, connector):
|
def _create_host(self, common, volume, connector,
|
||||||
|
remote_target=None, src_cpg=None, remote_client=None):
|
||||||
"""Creates or modifies existing 3PAR host."""
|
"""Creates or modifies existing 3PAR host."""
|
||||||
# make sure we don't have the host already
|
# make sure we don't have the host already
|
||||||
host = None
|
host = None
|
||||||
|
domain = None
|
||||||
username = None
|
username = None
|
||||||
password = None
|
password = None
|
||||||
hostname = common._safe_hostname(connector['host'])
|
hostname = common._safe_hostname(connector['host'])
|
||||||
cpg = common.get_cpg(volume, allowSnap=True)
|
|
||||||
domain = common.get_domain(cpg)
|
|
||||||
|
|
||||||
# Get the CHAP secret if CHAP is enabled
|
if remote_target:
|
||||||
if common._client_conf['hpe3par_iscsi_chap_enabled']:
|
cpg = common._get_cpg_from_cpg_map(
|
||||||
vol_name = common._get_3par_vol_name(volume['id'])
|
remote_target['cpg_map'], src_cpg)
|
||||||
username = common.client.getVolumeMetaData(
|
cpg_obj = remote_client.getCPG(cpg)
|
||||||
vol_name, CHAP_USER_KEY)['value']
|
if 'domain' in cpg_obj:
|
||||||
password = common.client.getVolumeMetaData(
|
domain = cpg_obj['domain']
|
||||||
vol_name, CHAP_PASS_KEY)['value']
|
else:
|
||||||
|
cpg = common.get_cpg(volume, allowSnap=True)
|
||||||
|
domain = common.get_domain(cpg)
|
||||||
|
|
||||||
|
if not remote_target:
|
||||||
|
# Get the CHAP secret if CHAP is enabled
|
||||||
|
if common._client_conf['hpe3par_iscsi_chap_enabled']:
|
||||||
|
vol_name = common._get_3par_vol_name(volume['id'])
|
||||||
|
username = common.client.getVolumeMetaData(
|
||||||
|
vol_name, CHAP_USER_KEY)['value']
|
||||||
|
password = common.client.getVolumeMetaData(
|
||||||
|
vol_name, CHAP_PASS_KEY)['value']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
host = common._get_3par_host(hostname)
|
if remote_target:
|
||||||
# Check whether host with iqn of initiator present on 3par
|
host = remote_client.getHost(hostname)
|
||||||
hosts = common.client.queryHost(iqns=[connector['initiator']])
|
else:
|
||||||
host, hostname = common._get_prioritized_host_on_3par(host,
|
host = common._get_3par_host(hostname)
|
||||||
hosts,
|
# Check whether host with iqn of initiator present on 3par
|
||||||
hostname)
|
hosts = common.client.queryHost(iqns=[connector['initiator']])
|
||||||
|
host, hostname = (
|
||||||
|
common._get_prioritized_host_on_3par(
|
||||||
|
host, hosts, hostname))
|
||||||
except hpeexceptions.HTTPNotFound:
|
except hpeexceptions.HTTPNotFound:
|
||||||
# get persona from the volume type extra specs
|
# get persona from the volume type extra specs
|
||||||
persona_id = common.get_persona_type(volume)
|
persona_id = common.get_persona_type(volume)
|
||||||
@ -501,22 +632,27 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
|
|||||||
hostname,
|
hostname,
|
||||||
[connector['initiator']],
|
[connector['initiator']],
|
||||||
domain,
|
domain,
|
||||||
persona_id)
|
persona_id,
|
||||||
|
remote_client)
|
||||||
else:
|
else:
|
||||||
if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1:
|
if not remote_target:
|
||||||
self._modify_3par_iscsi_host(
|
if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1:
|
||||||
common, hostname,
|
self._modify_3par_iscsi_host(
|
||||||
connector['initiator'])
|
common, hostname,
|
||||||
elif (not host['initiatorChapEnabled'] and
|
connector['initiator'])
|
||||||
common._client_conf['hpe3par_iscsi_chap_enabled']):
|
elif (not host['initiatorChapEnabled'] and
|
||||||
LOG.warning("Host exists without CHAP credentials set and "
|
common._client_conf['hpe3par_iscsi_chap_enabled']):
|
||||||
"has iSCSI attachments but CHAP is enabled. "
|
LOG.warning("Host exists without CHAP credentials set and "
|
||||||
"Updating host with new CHAP credentials.")
|
"has iSCSI attachments but CHAP is enabled. "
|
||||||
|
"Updating host with new CHAP credentials.")
|
||||||
|
|
||||||
# set/update the chap details for the host
|
if remote_target:
|
||||||
self._set_3par_chaps(common, hostname, volume, username, password)
|
host = remote_client.getHost(hostname)
|
||||||
host = common._get_3par_host(hostname)
|
else:
|
||||||
return host, username, password
|
# set/update the chap details for the host
|
||||||
|
self._set_3par_chaps(common, hostname, volume, username, password)
|
||||||
|
host = common._get_3par_host(hostname)
|
||||||
|
return host, username, password, cpg
|
||||||
|
|
||||||
def _do_export(self, common, volume, connector):
|
def _do_export(self, common, volume, connector):
|
||||||
"""Gets the associated account, generates CHAP info and updates."""
|
"""Gets the associated account, generates CHAP info and updates."""
|
||||||
|
@ -109,6 +109,8 @@ Supported operations
|
|||||||
|
|
||||||
* Report Backend State in Service List.
|
* Report Backend State in Service List.
|
||||||
|
|
||||||
|
* Peer Persistence.
|
||||||
|
|
||||||
Volume type support for both HPE 3PAR drivers includes the ability to set the
|
Volume type support for both HPE 3PAR drivers includes the ability to set the
|
||||||
following capabilities in the OpenStack Block Storage API
|
following capabilities in the OpenStack Block Storage API
|
||||||
``cinder.api.contrib.types_extra_specs`` volume type extra specs extension
|
``cinder.api.contrib.types_extra_specs`` volume type extra specs extension
|
||||||
@ -461,3 +463,43 @@ Note: If above mentioned option (nsp) is not specified in cinder.conf,
|
|||||||
then the original flow is executed i.e first target is picked and
|
then the original flow is executed i.e first target is picked and
|
||||||
bootable volume creation may fail.
|
bootable volume creation may fail.
|
||||||
|
|
||||||
|
Peer Persistence support
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Given 3PAR backend configured with replication setup, currently only
|
||||||
|
Active/Passive replication is supported by 3PAR in OpenStack. When
|
||||||
|
failover happens, nova does not support volume force-detach (from
|
||||||
|
dead primary backend) / re-attach to secondary backend. Storage
|
||||||
|
engineer's manual intervention is required.
|
||||||
|
|
||||||
|
To overcome above scenario, support for Peer Persistence is added.
|
||||||
|
Given a system with Peer Persistence configured and replicated volume
|
||||||
|
is created. When this volume is attached to an instance, vlun is
|
||||||
|
created automatically in secondary backend, in addition to primary
|
||||||
|
backend. So that when a failover happens, it is seamless.
|
||||||
|
|
||||||
|
For Peer Persistence support, perform following steps:
|
||||||
|
1] enable multipath
|
||||||
|
2] set replication mode as "sync"
|
||||||
|
3] configure a quorum witness server
|
||||||
|
|
||||||
|
Specify ip address of quorum witness server in ``/etc/cinder/cinder.conf``
|
||||||
|
[within backend section] as given below:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
[3pariscsirep]
|
||||||
|
hpe3par_api_url = http://10.50.3.7:8008/api/v1
|
||||||
|
hpe3par_username = <user_name>
|
||||||
|
hpe3par_password = <password>
|
||||||
|
...
|
||||||
|
<other parameters>
|
||||||
|
...
|
||||||
|
replication_device = backend_id:CSIM-EOS12_1611702,
|
||||||
|
replication_mode:sync,
|
||||||
|
quorum_witness_ip:10.50.3.192,
|
||||||
|
hpe3par_api_url:http://10.50.3.22:8008/api/v1,
|
||||||
|
...
|
||||||
|
<other parameters>
|
||||||
|
...
|
||||||
|
|
||||||
|
@ -896,7 +896,7 @@ driver.dell_emc_vnx=missing
|
|||||||
driver.dell_emc_vxflexos=missing
|
driver.dell_emc_vxflexos=missing
|
||||||
driver.dell_emc_xtremio=missing
|
driver.dell_emc_xtremio=missing
|
||||||
driver.fujitsu_eternus=missing
|
driver.fujitsu_eternus=missing
|
||||||
driver.hpe_3par=missing
|
driver.hpe_3par=complete
|
||||||
driver.hpe_lefthand=missing
|
driver.hpe_lefthand=missing
|
||||||
driver.hpe_msa=missing
|
driver.hpe_msa=missing
|
||||||
driver.huawei_t_v1=missing
|
driver.huawei_t_v1=missing
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added Peer Persistence support in HPE 3PAR cinder driver.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user