nimble storage: retype support
retype support between 2 nimble backends with and without migration - update the provider location for fetching the correct discovery ip for volume discovery - prevent failures on snap deletetion / offline as they could already be in those states Change-Id: If21bd6c6aa9cef45f57f32034e26502cd02743b2 Closes-Bug: #1790907 Signed-off-by: Raunak Kumar <rkumar@nimblestorage.com>
This commit is contained in:
parent
4580c56058
commit
fb17168c26
@ -174,6 +174,7 @@ FAKE_GENERIC_POSITIVE_RESPONSE = ""
|
|||||||
FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone"
|
FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone"
|
||||||
|
|
||||||
FAKE_TYPE_ID = fake.VOLUME_TYPE_ID
|
FAKE_TYPE_ID = fake.VOLUME_TYPE_ID
|
||||||
|
FAKE_TYPE_ID_NEW = fake.VOLUME_TYPE2_ID
|
||||||
FAKE_POOL_ID = fake.GROUP_ID
|
FAKE_POOL_ID = fake.GROUP_ID
|
||||||
FAKE_PERFORMANCE_POLICY_ID = fake.OBJECT_ID
|
FAKE_PERFORMANCE_POLICY_ID = fake.OBJECT_ID
|
||||||
NIMBLE_MANAGEMENT_IP = "10.18.108.55"
|
NIMBLE_MANAGEMENT_IP = "10.18.108.55"
|
||||||
@ -668,7 +669,7 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
|
|||||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||||
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
||||||
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
|
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
|
||||||
return_value = ['', '']))
|
return_value=['', '']))
|
||||||
def test_delete_volume(self):
|
def test_delete_volume(self):
|
||||||
self.mock_client_service.online_vol.return_value = (
|
self.mock_client_service.online_vol.return_value = (
|
||||||
FAKE_GENERIC_POSITIVE_RESPONSE)
|
FAKE_GENERIC_POSITIVE_RESPONSE)
|
||||||
@ -688,20 +689,24 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
|
|||||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||||
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
||||||
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
|
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
|
||||||
return_value = ['', '']))
|
return_value=['', '']))
|
||||||
def test_delete_volume_with_clone(self):
|
def test_delete_volume_with_clone(self):
|
||||||
self.mock_client_service.delete_vol.side_effect = \
|
self.mock_client_service.delete_vol.side_effect = \
|
||||||
nimble.NimbleAPIException(FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE)
|
nimble.NimbleAPIException(FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE)
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.VolumeIsBusy,
|
exception.VolumeIsBusy,
|
||||||
self.driver.delete_volume,
|
self.driver.delete_volume,
|
||||||
{'name': 'testvolume'})
|
{'name': 'testvolume'})
|
||||||
expected_calls = [mock.call.online_vol(
|
|
||||||
'testvolume', False),
|
expected_calls = [
|
||||||
|
mock.call.login(),
|
||||||
|
mock.call.online_vol('testvolume', False),
|
||||||
mock.call.delete_vol('testvolume'),
|
mock.call.delete_vol('testvolume'),
|
||||||
mock.call.delete_vol('testvolume'),
|
mock.call.delete_vol('testvolume'),
|
||||||
mock.call.delete_vol('testvolume'),
|
mock.call.delete_vol('testvolume'),
|
||||||
mock.call.online_vol('testvolume', True)]
|
mock.call.online_vol('testvolume', True)]
|
||||||
|
|
||||||
self.mock_client_service.assert_has_calls(expected_calls)
|
self.mock_client_service.assert_has_calls(expected_calls)
|
||||||
|
|
||||||
@mock.patch(NIMBLE_URLLIB2)
|
@mock.patch(NIMBLE_URLLIB2)
|
||||||
@ -752,11 +757,11 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
|
|||||||
@mock.patch(NIMBLE_CLIENT)
|
@mock.patch(NIMBLE_CLIENT)
|
||||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
|
||||||
mock.Mock(type_id=FAKE_TYPE_ID,
|
mock.Mock(type_id=FAKE_TYPE_ID,
|
||||||
return_value=
|
return_value={
|
||||||
{'nimble:perfpol-name': 'default',
|
'nimble:perfpol-name': 'default',
|
||||||
'nimble:encryption': 'yes',
|
'nimble:encryption': 'yes',
|
||||||
'nimble:multi-initiator': 'false',
|
'nimble:multi-initiator': 'false',
|
||||||
'nimble:iops-limit': '1024'}))
|
'nimble:iops-limit': '1024'}))
|
||||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||||
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False))
|
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False))
|
||||||
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host')
|
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host')
|
||||||
@ -962,6 +967,32 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
|
|||||||
{'name': 'volume-abcdef'}
|
{'name': 'volume-abcdef'}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@mock.patch(NIMBLE_URLLIB2)
|
||||||
|
@mock.patch(NIMBLE_CLIENT)
|
||||||
|
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
||||||
|
mock.Mock(return_value=[]))
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type',
|
||||||
|
mock.Mock(type_id=FAKE_TYPE_ID_NEW,
|
||||||
|
return_value={
|
||||||
|
'id': FAKE_TYPE_ID_NEW,
|
||||||
|
'extra_specs':
|
||||||
|
{'nimble:perfpol-name': 'default',
|
||||||
|
'nimble:encryption': 'yes',
|
||||||
|
'nimble:multi-initiator': 'false',
|
||||||
|
'nimble:iops-limit': '1024'}}))
|
||||||
|
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||||
|
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
||||||
|
def test_retype(self):
|
||||||
|
self.mock_client_service.get_vol_info.return_value = (
|
||||||
|
FAKE_GET_VOL_INFO_ONLINE)
|
||||||
|
retype, update = self.driver.retype(None, FAKE_GET_VOL_INFO_ONLINE,
|
||||||
|
volume_types.get_volume_type(
|
||||||
|
None,
|
||||||
|
FAKE_TYPE_ID_NEW),
|
||||||
|
None, None)
|
||||||
|
self.assertTrue(retype)
|
||||||
|
self.assertIsNone(update)
|
||||||
|
|
||||||
@mock.patch(NIMBLE_URLLIB2)
|
@mock.patch(NIMBLE_URLLIB2)
|
||||||
@mock.patch(NIMBLE_CLIENT)
|
@mock.patch(NIMBLE_CLIENT)
|
||||||
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
||||||
@ -1120,6 +1151,7 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
|
|||||||
expected_res = {
|
expected_res = {
|
||||||
'driver_volume_type': 'iscsi',
|
'driver_volume_type': 'iscsi',
|
||||||
'data': {
|
'data': {
|
||||||
|
'target_discovered': False,
|
||||||
'volume_id': 12,
|
'volume_id': 12,
|
||||||
'target_iqn': '13',
|
'target_iqn': '13',
|
||||||
'target_lun': 0,
|
'target_lun': 0,
|
||||||
@ -1132,6 +1164,39 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
|
|||||||
'id': 12},
|
'id': 12},
|
||||||
{'initiator': 'test-initiator1'}))
|
{'initiator': 'test-initiator1'}))
|
||||||
|
|
||||||
|
@mock.patch(NIMBLE_URLLIB2)
|
||||||
|
@mock.patch(NIMBLE_CLIENT)
|
||||||
|
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
||||||
|
mock.Mock(return_value=[]))
|
||||||
|
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||||
|
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
|
||||||
|
@mock.patch(NIMBLE_ISCSI_DRIVER + '._get_data_ips')
|
||||||
|
@mock.patch(NIMBLE_ISCSI_DRIVER + ".get_lun_number")
|
||||||
|
@mock.patch(NIMBLE_ISCSI_DRIVER + '._get_gst_for_group')
|
||||||
|
def test_initialize_connection_group_scoped_target(self, mock_gst_name,
|
||||||
|
mock_lun_number,
|
||||||
|
mock_data_ips):
|
||||||
|
mock_data_ips.return_value = ['12', '13']
|
||||||
|
mock_lun_number.return_value = 0
|
||||||
|
mock_gst_name.return_value = "group_target_name"
|
||||||
|
self.mock_client_service.get_initiator_grp_list.return_value = (
|
||||||
|
FAKE_IGROUP_LIST_RESPONSE)
|
||||||
|
expected_res = {
|
||||||
|
'driver_volume_type': 'iscsi',
|
||||||
|
'data': {
|
||||||
|
'target_discovered': False,
|
||||||
|
'volume_id': 12,
|
||||||
|
'target_iqns': ['group_target_name', 'group_target_name'],
|
||||||
|
'target_luns': [0, 0],
|
||||||
|
'target_portals': ['12', '13']}}
|
||||||
|
self.assertEqual(
|
||||||
|
expected_res,
|
||||||
|
self.driver.initialize_connection(
|
||||||
|
{'name': 'test-volume',
|
||||||
|
'provider_location': '12 group_target_name',
|
||||||
|
'id': 12},
|
||||||
|
{'initiator': 'test-initiator1'}))
|
||||||
|
|
||||||
@mock.patch(NIMBLE_URLLIB2)
|
@mock.patch(NIMBLE_URLLIB2)
|
||||||
@mock.patch(NIMBLE_CLIENT)
|
@mock.patch(NIMBLE_CLIENT)
|
||||||
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
|
||||||
@ -1144,6 +1209,7 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
|
|||||||
expected_res = {
|
expected_res = {
|
||||||
'driver_volume_type': 'iscsi',
|
'driver_volume_type': 'iscsi',
|
||||||
'data': {
|
'data': {
|
||||||
|
'target_discovered': False,
|
||||||
'volume_id': 12,
|
'volume_id': 12,
|
||||||
'target_iqn': '13',
|
'target_iqn': '13',
|
||||||
'target_lun': 0,
|
'target_lun': 0,
|
||||||
@ -1223,6 +1289,7 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
|
|||||||
expected_res = {
|
expected_res = {
|
||||||
'driver_volume_type': 'iscsi',
|
'driver_volume_type': 'iscsi',
|
||||||
'data': {
|
'data': {
|
||||||
|
'target_discovered': False,
|
||||||
'target_lun': 0,
|
'target_lun': 0,
|
||||||
'volume_id': 12,
|
'volume_id': 12,
|
||||||
'target_iqn': '13',
|
'target_iqn': '13',
|
||||||
|
@ -220,7 +220,6 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
|||||||
@utils.retry(NimbleAPIException, retries=3)
|
@utils.retry(NimbleAPIException, retries=3)
|
||||||
def _retry_remove_vol(volume):
|
def _retry_remove_vol(volume):
|
||||||
self.APIExecutor.delete_vol(volume['name'])
|
self.APIExecutor.delete_vol(volume['name'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_retry_remove_vol(volume)
|
_retry_remove_vol(volume)
|
||||||
except NimbleAPIException as ex:
|
except NimbleAPIException as ex:
|
||||||
@ -423,6 +422,59 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
|||||||
|
|
||||||
return vol_name
|
return vol_name
|
||||||
|
|
||||||
|
def _get_volumetype_extraspecs_with_type(self, type_id):
|
||||||
|
specs = {}
|
||||||
|
if type_id is not None:
|
||||||
|
specs = volume_types.get_volume_type_extra_specs(type_id)
|
||||||
|
return specs
|
||||||
|
|
||||||
|
def retype(self, context, volume, new_type, diff, host):
|
||||||
|
"""Retype from one volume type to another.
|
||||||
|
|
||||||
|
At this point HPE Nimble Storage does not differentiate between
|
||||||
|
volume types on the same array. This is a no-op for us if there are
|
||||||
|
no extra specs else honor the extra-specs.
|
||||||
|
"""
|
||||||
|
if new_type is None:
|
||||||
|
return True, None
|
||||||
|
LOG.debug("retype called with volume_type %s", new_type)
|
||||||
|
|
||||||
|
volume_type_id = new_type['id']
|
||||||
|
if volume_type_id is None:
|
||||||
|
raise NimbleAPIException(_("No volume_type_id present in"
|
||||||
|
" %(type)s") % {'type': new_type})
|
||||||
|
|
||||||
|
LOG.debug("volume_type id is %s", volume_type_id)
|
||||||
|
specs_map = self._get_volumetype_extraspecs_with_type(
|
||||||
|
volume_type_id)
|
||||||
|
if specs_map is None:
|
||||||
|
# no extra specs to retype
|
||||||
|
LOG.debug("volume_type %s has no extra specs", volume_type_id)
|
||||||
|
return True, None
|
||||||
|
vol_info = self.APIExecutor.get_vol_info(volume['name'])
|
||||||
|
LOG.debug("new extra specs %s", specs_map)
|
||||||
|
data = self.APIExecutor.get_valid_nimble_extraspecs(specs_map,
|
||||||
|
vol_info)
|
||||||
|
if data is None:
|
||||||
|
# return if there is no update
|
||||||
|
LOG.debug("no data to update for %s", new_type)
|
||||||
|
return True, None
|
||||||
|
try:
|
||||||
|
# offline the volume before edit
|
||||||
|
self.APIExecutor.online_vol(volume['name'], False)
|
||||||
|
# modify the volume
|
||||||
|
LOG.debug("updated volume %s", data)
|
||||||
|
self.APIExecutor.edit_vol(volume['name'], data)
|
||||||
|
# make the volume online after changing the specs
|
||||||
|
self.APIExecutor.online_vol(volume['name'], True)
|
||||||
|
except NimbleAPIException as ex:
|
||||||
|
raise NimbleAPIException(_("Unable to retype %(vol)s to "
|
||||||
|
"%(type)s: %(err)s") %
|
||||||
|
{'vol': volume['name'],
|
||||||
|
'type': new_type,
|
||||||
|
'err': ex.message})
|
||||||
|
return True, None
|
||||||
|
|
||||||
def manage_existing(self, volume, external_ref):
|
def manage_existing(self, volume, external_ref):
|
||||||
"""Manage an existing nimble volume (import to cinder)"""
|
"""Manage an existing nimble volume (import to cinder)"""
|
||||||
|
|
||||||
@ -582,9 +634,9 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
|||||||
AGENT_TYPE_OPENSTACK}}
|
AGENT_TYPE_OPENSTACK}}
|
||||||
self.APIExecutor.edit_vol(vol.name, data)
|
self.APIExecutor.edit_vol(vol.name, data)
|
||||||
except NimbleAPIException:
|
except NimbleAPIException:
|
||||||
|
# just log the error but don't fail driver initialization
|
||||||
LOG.warning('Error updating agent-type for '
|
LOG.warning('Error updating agent-type for '
|
||||||
'volume %s.', vol.name)
|
'volume %s.', vol.name)
|
||||||
raise
|
|
||||||
|
|
||||||
def _get_model_info(self, volume_name):
|
def _get_model_info(self, volume_name):
|
||||||
"""Get model info for the volume."""
|
"""Get model info for the volume."""
|
||||||
@ -683,6 +735,9 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
|
|||||||
group_info['group_target_name'] is not None):
|
group_info['group_target_name'] is not None):
|
||||||
self._group_target_name = group_info['group_target_name']
|
self._group_target_name = group_info['group_target_name']
|
||||||
|
|
||||||
|
def _get_gst_for_group(self):
|
||||||
|
return self._group_target_name
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector):
|
def initialize_connection(self, volume, connector):
|
||||||
"""Driver entry point to attach a volume to an instance."""
|
"""Driver entry point to attach a volume to an instance."""
|
||||||
LOG.info('Entering initialize_connection volume=%(vol)s'
|
LOG.info('Entering initialize_connection volume=%(vol)s'
|
||||||
@ -700,22 +755,32 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
|
|||||||
'%(iname)s',
|
'%(iname)s',
|
||||||
{'grp': initiator_group_name, 'iname': initiator_name})
|
{'grp': initiator_group_name, 'iname': initiator_name})
|
||||||
self.APIExecutor.add_acl(volume, initiator_group_name)
|
self.APIExecutor.add_acl(volume, initiator_group_name)
|
||||||
|
properties = {"driver_volume_type": "iscsi",
|
||||||
|
"data": {"target_discovered": False},
|
||||||
|
}
|
||||||
|
properties['data']['volume_id'] = volume['id'] # used by xen currently
|
||||||
(iscsi_portal, iqn) = volume['provider_location'].split()
|
(iscsi_portal, iqn) = volume['provider_location'].split()
|
||||||
if self._group_target_name is not None:
|
if self._get_gst_for_group() is not None:
|
||||||
lun_num = self.get_lun_number(volume, initiator_group_name)
|
lun_num = self.get_lun_number(volume, initiator_group_name)
|
||||||
|
netconfig = self.APIExecutor.get_netconfig('active')
|
||||||
|
target_portals = self._get_data_ips(netconfig)
|
||||||
|
LOG.info("target portals %(portals)s", {'portals': target_portals})
|
||||||
|
target_luns = [int(lun_num)] * len(target_portals)
|
||||||
|
target_iqns = [iqn] * len(target_portals)
|
||||||
|
LOG.debug("target iqns %(iqns)s target luns %(luns)s",
|
||||||
|
{'iqns': target_iqns, 'luns': target_luns})
|
||||||
|
if target_luns and target_iqns and target_portals:
|
||||||
|
properties["data"]["target_luns"] = target_luns
|
||||||
|
properties["data"]["target_iqns"] = target_iqns
|
||||||
|
properties["data"]["target_portals"] = target_portals
|
||||||
else:
|
else:
|
||||||
|
# handling volume scoped target
|
||||||
lun_num = LUN_ID
|
lun_num = LUN_ID
|
||||||
|
properties['data']['target_portal'] = iscsi_portal
|
||||||
|
properties['data']['target_iqn'] = iqn
|
||||||
|
properties['data']['target_lun'] = int(lun_num)
|
||||||
|
|
||||||
properties = {}
|
return properties
|
||||||
properties['target_portal'] = iscsi_portal
|
|
||||||
properties['target_iqn'] = iqn
|
|
||||||
properties['target_lun'] = int(lun_num)
|
|
||||||
properties['volume_id'] = volume['id'] # used by xen currently
|
|
||||||
|
|
||||||
return {
|
|
||||||
'driver_volume_type': 'iscsi',
|
|
||||||
'data': properties,
|
|
||||||
}
|
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
def terminate_connection(self, volume, connector, **kwargs):
|
||||||
"""Driver entry point to unattach a volume from an instance."""
|
"""Driver entry point to unattach a volume from an instance."""
|
||||||
@ -737,48 +802,48 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
|
|||||||
"""Get volume iqn for initiator access."""
|
"""Get volume iqn for initiator access."""
|
||||||
vol_info = self.APIExecutor.get_vol_info(volume_name)
|
vol_info = self.APIExecutor.get_vol_info(volume_name)
|
||||||
netconfig = self.APIExecutor.get_netconfig('active')
|
netconfig = self.APIExecutor.get_netconfig('active')
|
||||||
|
|
||||||
self._set_gst_for_group()
|
self._set_gst_for_group()
|
||||||
if self._group_target_name:
|
if self._get_gst_for_group() is not None:
|
||||||
iqn = self._group_target_name
|
iqn = self._get_gst_for_group()
|
||||||
target_ipaddr = self._get_data_ip(netconfig)
|
|
||||||
iscsi_portal = target_ipaddr + ':3260'
|
|
||||||
else:
|
else:
|
||||||
iqn = vol_info['target_name']
|
iqn = vol_info['target_name']
|
||||||
target_ipaddr = self._get_discovery_ip(netconfig)
|
target_ipaddr = self._get_discovery_ip(netconfig)
|
||||||
iscsi_portal = target_ipaddr + ':3260'
|
iscsi_portal = target_ipaddr + ':3260'
|
||||||
provider_location = '%s %s' % (iscsi_portal, iqn)
|
provider_location = '%s %s' % (iscsi_portal, iqn)
|
||||||
LOG.info('vol_name=%(name)s provider_location=%(loc)s',
|
LOG.info('vol_name=%(name)s provider_location=%(loc)s',
|
||||||
{'name': volume_name, 'loc': provider_location})
|
{'name': volume_name, 'loc': provider_location})
|
||||||
return provider_location
|
return provider_location
|
||||||
|
|
||||||
def _get_data_ip(self, netconfig):
|
def _get_data_ips(self, netconfig):
|
||||||
"""Get data ip."""
|
"""Get data ips."""
|
||||||
subnet_label = self.configuration.nimble_subnet_label
|
subnet_label = self.configuration.nimble_subnet_label
|
||||||
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
|
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
|
||||||
{'netlabel': subnet_label, 'netconf': netconfig})
|
{'netlabel': subnet_label, 'netconf': netconfig})
|
||||||
ret_data_ip = ''
|
ret_data_ips = []
|
||||||
for subnet in netconfig['array_list'][0]['nic_list']:
|
for subnet in netconfig['array_list'][0]['nic_list']:
|
||||||
LOG.info('Exploring array subnet label %s', subnet[
|
LOG.info('Exploring array subnet label %s', subnet[
|
||||||
'subnet_label'])
|
'subnet_label'])
|
||||||
if subnet['data_ip']:
|
if subnet['data_ip']:
|
||||||
if subnet_label == '*':
|
if subnet_label == '*':
|
||||||
# Use the first data subnet, save mgmt+data for later
|
# if all subnets are mentioned then return all portals
|
||||||
|
# else just return specific subnet
|
||||||
LOG.info('Data ip %(data_ip)s is used '
|
LOG.info('Data ip %(data_ip)s is used '
|
||||||
'on data subnet %(net_label)s',
|
'on data subnet %(net_label)s',
|
||||||
{'data_ip': subnet['data_ip'],
|
{'data_ip': subnet['data_ip'],
|
||||||
'net_label': subnet['subnet_label']})
|
'net_label': subnet['subnet_label']})
|
||||||
return subnet['data_ip']
|
ret_data_ips.append(str(subnet['data_ip']) + ':3260')
|
||||||
elif subnet_label == subnet['subnet_label']:
|
elif subnet_label == subnet['subnet_label']:
|
||||||
LOG.info('Data ip %(data_ip)s is used'
|
LOG.info('Data ip %(data_ip)s is used'
|
||||||
' on subnet %(net_label)s',
|
' on subnet %(net_label)s',
|
||||||
{'data_ip': subnet['data_ip'],
|
{'data_ip': subnet['data_ip'],
|
||||||
'net_label': subnet['subnet_label']})
|
'net_label': subnet['subnet_label']})
|
||||||
return subnet['data_ip']
|
data_ips_single_subnet = []
|
||||||
if ret_data_ip:
|
data_ips_single_subnet.append(str(subnet['data_ip']) +
|
||||||
LOG.info('Data ip %s is used on mgmt+data subnet',
|
':3260')
|
||||||
ret_data_ip)
|
return data_ips_single_subnet
|
||||||
return ret_data_ip
|
if ret_data_ips:
|
||||||
|
LOG.info('Data ips %s', ret_data_ips)
|
||||||
|
return ret_data_ips
|
||||||
else:
|
else:
|
||||||
raise NimbleDriverException(_('No suitable data ip found'))
|
raise NimbleDriverException(_('No suitable data ip found'))
|
||||||
|
|
||||||
@ -913,12 +978,10 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
|
|||||||
{'vol': volume,
|
{'vol': volume,
|
||||||
'conn': connector,
|
'conn': connector,
|
||||||
'loc': volume['provider_location']})
|
'loc': volume['provider_location']})
|
||||||
|
|
||||||
wwpns = []
|
wwpns = []
|
||||||
initiator_name = connector['initiator']
|
initiator_name = connector['initiator']
|
||||||
for wwpn in connector['wwpns']:
|
for wwpn in connector['wwpns']:
|
||||||
wwpns.append(wwpn)
|
wwpns.append(wwpn)
|
||||||
init_targ_map = {}
|
|
||||||
(array_name) = volume['provider_location'].split()
|
(array_name) = volume['provider_location'].split()
|
||||||
target_wwns = self.get_wwpns_from_array(array_name)
|
target_wwns = self.get_wwpns_from_array(array_name)
|
||||||
init_targ_map = self._build_initiator_target_map(target_wwns,
|
init_targ_map = self._build_initiator_target_map(target_wwns,
|
||||||
@ -1098,6 +1161,70 @@ class NimbleRestAPIExecutor(object):
|
|||||||
|
|
||||||
return extra_specs_map
|
return extra_specs_map
|
||||||
|
|
||||||
|
def get_valid_nimble_extraspecs(self, extra_specs_map, vol_info):
|
||||||
|
|
||||||
|
extra_specs_map_updated = self._get_extra_spec_values(extra_specs_map)
|
||||||
|
data = {"data": {}}
|
||||||
|
perf_policy_name = extra_specs_map_updated[EXTRA_SPEC_PERF_POLICY]
|
||||||
|
perf_policy_id = self.get_performance_policy_id(perf_policy_name)
|
||||||
|
data['perfpolicy_id'] = perf_policy_id
|
||||||
|
|
||||||
|
encrypt = extra_specs_map_updated[EXTRA_SPEC_ENCRYPTION]
|
||||||
|
cipher = DEFAULT_CIPHER
|
||||||
|
if encrypt.lower() == 'yes':
|
||||||
|
cipher = AES_256_XTS_CIPHER
|
||||||
|
data['cipher'] = cipher
|
||||||
|
|
||||||
|
multi_initiator = extra_specs_map_updated[EXTRA_SPEC_MULTI_INITIATOR]
|
||||||
|
data['multi_initiator'] = multi_initiator
|
||||||
|
|
||||||
|
folder_name = extra_specs_map_updated[EXTRA_SPEC_FOLDER]
|
||||||
|
folder_id = None
|
||||||
|
pool_id = vol_info['pool_id']
|
||||||
|
pool_name = vol_info['pool_name']
|
||||||
|
if folder_name is not None:
|
||||||
|
# validate if folder exists in pool_name
|
||||||
|
pool_info = self.get_pool_info(pool_id)
|
||||||
|
if 'folder_list' in pool_info and (pool_info['folder_list'] is
|
||||||
|
not None):
|
||||||
|
for folder_list in pool_info['folder_list']:
|
||||||
|
LOG.debug("folder_list : %s", folder_list)
|
||||||
|
if folder_list['fqn'] == "/" + folder_name:
|
||||||
|
LOG.debug("Folder %(folder)s present in pool "
|
||||||
|
"%(pool)s",
|
||||||
|
{'folder': folder_name,
|
||||||
|
'pool': pool_name})
|
||||||
|
folder_id = self.get_folder_id(folder_name)
|
||||||
|
if folder_id is not None:
|
||||||
|
data['data']["folder_id"] = folder_id
|
||||||
|
if folder_id is None:
|
||||||
|
raise NimbleAPIException(_("Folder '%(folder)s' not "
|
||||||
|
"present in pool '%("
|
||||||
|
"pool)s'") %
|
||||||
|
{'folder': folder_name,
|
||||||
|
'pool': pool_name})
|
||||||
|
else:
|
||||||
|
raise NimbleAPIException(_(
|
||||||
|
"Folder '%(folder)s' not present in pool '%(pool)s'")
|
||||||
|
% {'folder': folder_name,
|
||||||
|
'pool': pool_name})
|
||||||
|
iops_limit = extra_specs_map_updated[EXTRA_SPEC_IOPS_LIMIT]
|
||||||
|
if iops_limit is not None:
|
||||||
|
if not iops_limit.isdigit() or (
|
||||||
|
int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS):
|
||||||
|
raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]")
|
||||||
|
% {'err': IOPS_ERR_MSG,
|
||||||
|
'min': MIN_IOPS,
|
||||||
|
'max': MAX_IOPS})
|
||||||
|
|
||||||
|
data['data']['limit_iops'] = iops_limit
|
||||||
|
|
||||||
|
dedupe = extra_specs_map_updated[EXTRA_SPEC_DEDUPE]
|
||||||
|
if dedupe.lower() == 'true':
|
||||||
|
data['data']['dedupe_enabled'] = True
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled):
|
def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled):
|
||||||
response = self._execute_create_vol(volume, pool_name, reserve,
|
response = self._execute_create_vol(volume, pool_name, reserve,
|
||||||
protocol, is_gst_enabled)
|
protocol, is_gst_enabled)
|
||||||
@ -1652,7 +1779,17 @@ class NimbleRestAPIExecutor(object):
|
|||||||
def delete_snap(self, volume_name, snap_name):
|
def delete_snap(self, volume_name, snap_name):
|
||||||
snap_info = self.get_snap_info(snap_name, volume_name)
|
snap_info = self.get_snap_info(snap_name, volume_name)
|
||||||
api = "snapshots/" + six.text_type(snap_info['id'])
|
api = "snapshots/" + six.text_type(snap_info['id'])
|
||||||
self.delete(api)
|
try:
|
||||||
|
self.delete(api)
|
||||||
|
except NimbleAPIException as ex:
|
||||||
|
LOG.debug("delete snapshot exception: %s", ex)
|
||||||
|
if SM_OBJ_HAS_CLONE in six.text_type(ex):
|
||||||
|
# if snap has a clone log the error and continue ahead
|
||||||
|
LOG.warning('Snapshot %(snap)s : %(state)s',
|
||||||
|
{'snap': snap_name,
|
||||||
|
'state': SM_OBJ_HAS_CLONE})
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
@_connection_checker
|
@_connection_checker
|
||||||
def get(self, api):
|
def get(self, api):
|
||||||
|
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Support for retype and volume migration for HPE Nimble Storage driver.
|
Loading…
Reference in New Issue
Block a user