Merge "NEC Driver: Storage assist retype and a bugfix"

This commit is contained in:
Zuul 2019-09-26 01:14:32 +00:00 committed by Gerrit Code Review
commit efd481fe65
4 changed files with 430 additions and 102 deletions

View File

@ -28,6 +28,8 @@ from cinder.volume import configuration as conf
from cinder.volume.drivers.nec import cli
from cinder.volume.drivers.nec import volume_common
from cinder.volume.drivers.nec import volume_helper
from cinder.volume import qos_specs
from cinder.volume import volume_types
xml_out = '''
@ -197,6 +199,17 @@ xml_out = '''
<UNIT name="RPL Attribute">SV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000f</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">59V9KIi0ZHWJ5yvjCG5RQ4_d</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0fff</UNIT>
@ -363,12 +376,22 @@ class DummyVolume(object):
def __init__(self, volid, volsize=1):
super(DummyVolume, self).__init__()
self.id = volid
self._name_id = None
self.size = volsize
self.status = None
self.volume_type_id = None
self.attach_status = None
self.volume_attachment = None
self.provider_location = None
self.name = None
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@name_id.setter
def name_id(self, value):
self._name_id = value
class DummySnapshot(object):
@ -419,6 +442,10 @@ class VolumeIDConvertTest(volume_helper.MStorageDSVDriver, test.TestCase):
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
def test_convert_deleteldname(self):
ldname = self._convert_deleteldname('LX:287RbQoP7VdwR1WsPC2fZT')
self.assertEqual(ldname, 'LX:287RbQoP7VdwR1WsPC2fZT_d')
class NominatePoolLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
@ -699,16 +726,36 @@ class BindLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
self.assertEqual(60, cli.get_sleep_time_for_clone(19))
def test_delete_volume(self):
vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b")
detached = self._detach_from_all(vol)
ldname = "LX:287RbQoP7VdwR1WsPC2fZT"
detached = self._detach_from_all(ldname, xml_out)
self.assertTrue(detached)
vol.id = constants.VOLUME_ID
detached = self._detach_from_all(vol)
ldname = 'LX:31HxzqBiAFTUxxOlcVn3EA'
detached = self._detach_from_all(ldname, xml_out)
self.assertFalse(detached)
vol.id = constants.VOLUME2_ID
with mock.patch.object(self, '_detach_from_all') as detach_mock:
vol = DummyVolume("1febb976-86d0-42ed-9bc0-4aa3e158f27d")
with mock.patch.object(self._cli, 'unbind') as unbind_mock:
self.delete_volume(vol)
detach_mock.assert_called_once_with(vol)
unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP')
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml_out))
vol = DummyVolume('1febb976-86d0-42ed-9bc0-4aa3e158f27d')
vol._name_id = None
with mock.patch.object(self._cli, 'unbind') as unbind_mock:
self.delete_volume(vol)
unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP')
vol = DummyVolume('46045673-41e7-44a7-9333-02f07feab04b')
vol._name_id = '1febb976-86d0-42ed-9bc0-4aa3e158f27d'
with mock.patch.object(self._cli, 'unbind') as unbind_mock:
self.delete_volume(vol)
unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP')
vol = DummyVolume(constants.VOLUME_ID)
vol._name_id = 'a951f0eb-27ae-41a7-a5e5-604e721a16d4'
with mock.patch.object(self._cli, 'unbind') as unbind_mock:
self.delete_volume(vol)
unbind_mock.assert_called_once_with('LX:59V9KIi0ZHWJ5yvjCG5RQ4_d')
class BindLDTest_Snap(volume_helper.MStorageDSVDriver, test.TestCase):
@ -1278,7 +1325,22 @@ class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase):
self.mock_object(self._cli, '_execute',
return_value=('success', 0, 0))
self.mock_object(self._cli, 'view_all', return_value=xml_out)
self.mock_object(self, '_bind_ld', return_value=(0, 0, 0))
self.mock_object(self._cli, 'backup_restore')
self.mock_object(volume_types, 'get_volume_type',
return_value={})
self.mock_object(qos_specs, 'get_qos_specs',
return_value={})
self.do_setup(None)
self._properties['cli_fip'] = '10.0.0.1'
self._properties['pool_pools'] = {0, 1}
self._properties['pool_backup_pools'] = {2, 3}
self.newvol = DummyVolume(constants.VOLUME_ID)
self.sourcevol = DummyVolume(constants.VOLUME2_ID)
self.host = {}
self.VERSION = '9.99.9'
self.host['capabilities'] = self._update_volume_status()
self.xml = xml_out
def test_update_migrate_volume(self):
newvol = DummyVolume(constants.VOLUME_ID)
@ -1288,6 +1350,115 @@ class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase):
self.assertIsNone(update_data['_name_id'])
self.assertIsNone(update_data['provider_location'])
def test_migrate_volume(self):
vol = DummyVolume(constants.VOLUME2_ID)
moved, model_update = self.migrate_volume(None, vol,
self.host)
self.assertTrue(moved)
vol.id = "87d8d42f-7550-4f43-9a2b-fe722bf86941"
with self.assertRaisesRegex(exception.NotFound,
'Logical Disk `LX:48L3QCi4npuqxPX0Lyeu8H`'
' could not be found.'):
self._validate_migrate_volume(vol, xml_out)
vol.id = '46045673-41e7-44a7-9333-02f07feab04b'
vol.status = 'creating'
with self.assertRaisesRegex(exception.VolumeBackendAPIException,
'Specified Logical Disk '
'LX:287RbQoP7VdwR1WsPC2fZT is '
'not available.'):
self._validate_migrate_volume(vol, xml_out)
vol.id = "92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af"
vol.status = 'available'
with self.assertRaisesRegex(exception.VolumeBackendAPIException,
r'Specified Logical Disk '
r'LX:4T7JpyqI3UuPlKeT9D3VQF has an '
r'invalid attribute '
r'\(\(invalid attribute\)\).'):
self._validate_migrate_volume(vol, xml_out)
def test_retype_volume(self):
vol = DummyVolume(constants.VOLUME2_ID)
diff = {'encryption': {},
'qos_specs': {},
'extra_specs': {u'volume_backend_name': (u'Storage1',
u'Storage2')}}
new_type = {'id': constants.VOLUME_TYPE_ID}
retyped = self.retype(None, vol, new_type, diff, self.host)
self.assertTrue(retyped)
volume_type = {'name': u'Bronze',
'qos_specs_id': u'57223246-1d49-4565-860f-bbbee6cee122',
'deleted': False,
'created_at': '2019-01-08 08:48:20',
'updated_at': '2019-01-08 08:48:29',
'extra_specs': {}, 'is_public': True,
'deleted_at': None,
'id': u'33cd6136-0465-4ee0-82fa-b5f3a9138249',
'description': None}
specs = {'specs': {u'lowerlimit': u'500', u'upperlimit': u'2000'}}
volume_types.get_volume_type.return_value = volume_type
qos_specs.get_qos_specs.return_value = specs
diff = {'encryption': {},
'qos_specs': {'consumer': (u'back-end', u'back-end'),
u'lowerlimit': (u'1000', u'500'),
u'upperlimit': (u'3000', u'2000')},
'extra_specs': {u'volume_backend_name': (u'Storage', None)}}
retyped = self.retype(None, vol, new_type, diff, self.host)
self.assertTrue(retyped)
diff = {'encryption': {},
'qos_specs': {'consumer': (u'back-end', None),
u'lowerlimit': (u'1000', u'500'),
u'upperlimit': (u'3000', u'2000')},
'extra_specs': {}}
retyped = self.retype(None, vol, new_type, diff, self.host)
self.assertTrue(retyped)
def test_validate_retype_volume(self):
vol = DummyVolume("87d8d42f-7550-4f43-9a2b-fe722bf86941")
with self.assertRaisesRegex(exception.NotFound,
'Logical Disk `LX:48L3QCi4npuqxPX0Lyeu8H`'
' could not be found.'):
self._validate_retype_volume(vol, xml_out)
vol = DummyVolume("92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af")
with self.assertRaisesRegex(exception.VolumeBackendAPIException,
r'Specified Logical Disk '
r'LX:4T7JpyqI3UuPlKeT9D3VQF has an '
r'invalid attribute '
r'\(\(invalid attribute\)\).'):
self._validate_retype_volume(vol, xml_out)
def test_spec_is_changed(self):
extra_specs = {u'volume_backend_name': (u'Storage', None)}
equal = self._spec_is_changed(extra_specs, 'volume_backend_name')
self.assertTrue(equal)
extra_specs = {u'volume_backend_name': (u'Storage', u'Storage')}
equal = self._spec_is_changed(extra_specs, 'volume_backend_name')
self.assertFalse(equal)
def test_check_same_backend(self):
diff = {'encryption': {},
'qos_specs': {'consumer': (u'back-end', u'back-end'),
u'upperlimit': (u'3000', u'2000'),
u'lowerlimit': (u'1000', u'500')},
'extra_specs': {u'volume_backend_name': (u'Storage', None)}}
qos = self._check_same_backend(diff)
self.assertFalse(qos)
diff['extra_specs'] = {u'volume_backend_name':
(u'Storage', u'Storage')}
qos = self._check_same_backend(diff)
self.assertTrue(qos)
diff['extra_specs'] = {u'volume_backend_name': (u'Storage', None),
u'dummy_specs': None}
qos = self._check_same_backend(diff)
self.assertFalse(qos)
class ManageUnmanage_test(volume_helper.MStorageDSVDriver, test.TestCase):
@ -1499,3 +1670,36 @@ class RevertToSnapshotTestCase(volume_helper.MStorageDSVDriver, test.TestCase):
'svname=LX:31HxzqBiAFTUxxOlcVn3EA, '
'status=snap/fault'):
self.revert_to_snapshot(None, vol, snap)
class SetQosSpec_test(volume_helper.MStorageDSVDriver,
test.TestCase):
def setUp(self):
super(SetQosSpec_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.mock_object(self._cli, '_execute',
return_value=('success', 0, 0))
self.do_setup(None)
def test_set_qos_spec(self):
volume_type = {'name': u'Bronze',
'qos_specs_id': u'57223246-1d49-4565-860f-bbbee6cee122',
'deleted': False,
'created_at': '2019-01-08 08:48:20',
'updated_at': '2019-01-08 08:48:29',
'extra_specs': {}, 'is_public': True,
'deleted_at': None,
'id': u'33cd6136-0465-4ee0-82fa-b5f3a9138249',
'description': None}
voltype_qos_specs = {'specs': {u'lowerlimit': u'500',
u'upperlimit': u'2000',
'upperreport': None}}
self.mock_object(volume_types, 'get_volume_type',
return_value=volume_type)
self.mock_object(qos_specs, 'get_qos_specs',
return_value=voltype_qos_specs)
ldname = 'LX:287RbQoP7VdwR1WsPC2fZT'
volume_type_id = '33cd6136-0465-4ee0-82fa-b5f3a9138249'
ret = self._set_qos_spec(ldname, volume_type_id)
self.assertIsNone(ret)

View File

@ -51,6 +51,9 @@ class MStorageISCSIDriver(volume_helper.MStorageDSVDriver,
Add support for multi-attach.
Add support of more than 4 iSCSI portals for a node.
Add support to revert a volume to a snapshot.
Add support storage assist retype and fixed bug #1838955:
a volume in NEC Storage was left undeleted when the volume
was retyped to another storage.
"""
VERSION = '1.11.1'
@ -116,6 +119,9 @@ class MStorageFCDriver(volume_helper.MStorageDSVDriver,
Add support for multi-attach.
Add support of more than 4 iSCSI portals for a node.
Add support to revert a volume to a snapshot.
Add support storage assist retype and fixed bug #1838955:
a volume in NEC Storage was left undeleted when the volume
was retyped to another storage.
"""
VERSION = '1.11.1'

View File

@ -820,11 +820,11 @@ class MStorageVolumeCommon(object):
xml = self._cli.view_all(self._properties['ismview_path'], False)
return self.configs(xml)
def get_volume_type_qos_specs(self, volume):
def get_volume_type_qos_specs(self, volume_type_id):
specs = {}
ctxt = context.get_admin_context()
type_id = volume.volume_type_id
type_id = volume_type_id
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
@ -834,56 +834,54 @@ class MStorageVolumeCommon(object):
LOG.debug('get_volume_type_qos_specs '
'volume_type=%(volume_type)s, '
'qos_specs_id=%(qos_spec_id)s '
'qos_specs_id=%(qos_spec_id)s, '
'specs=%(specs)s',
{'volume_type': volume_type,
'qos_spec_id': qos_specs_id,
'specs': specs})
return specs
def check_io_parameter(self, specs):
if ('upperlimit' not in specs and
'lowerlimit' not in specs and
'upperreport' not in specs):
specs['upperlimit'] = None
specs['lowerlimit'] = None
specs['upperreport'] = None
LOG.debug('qos parameter not found.')
def correct_qos_parameter(self, specs, reset):
if 'upperlimit' in specs and specs['upperlimit'] is not None:
if self.validates_number(specs['upperlimit']) is True:
upper_limit = int(specs['upperlimit'], 10)
if ((upper_limit != 0) and
((upper_limit < 10) or (upper_limit > 1000000))):
raise exception.InvalidConfigurationValue(
value=upper_limit, option='upperlimit')
else:
raise exception.InvalidConfigurationValue(
value=specs['upperlimit'], option='upperlimit')
else:
if 'upperlimit' in specs and specs['upperlimit'] is not None:
if self.validates_number(specs['upperlimit']) is True:
upper_limit = int(specs['upperlimit'], 10)
if ((upper_limit != 0) and
((upper_limit < 10) or (upper_limit > 1000000))):
raise exception.InvalidConfigurationValue(
value=upper_limit, option='upperlimit')
else:
raise exception.InvalidConfigurationValue(
value=specs['upperlimit'], option='upperlimit')
else:
specs['upperlimit'] = None
# 0: Set to no limit.(default)
# None: Keep current value.
specs['upperlimit'] = '0' if reset else None
if 'lowerlimit' in specs and specs['lowerlimit'] is not None:
if self.validates_number(specs['lowerlimit']) is True:
lower_limit = int(specs['lowerlimit'], 10)
if (lower_limit != 0 and (lower_limit < 10 or
lower_limit > 1000000)):
raise exception.InvalidConfigurationValue(
value=lower_limit, option='lowerlimit')
else:
if 'lowerlimit' in specs and specs['lowerlimit'] is not None:
if self.validates_number(specs['lowerlimit']) is True:
lower_limit = int(specs['lowerlimit'], 10)
if (lower_limit != 0 and (lower_limit < 10 or
lower_limit > 1000000)):
raise exception.InvalidConfigurationValue(
value=specs['lowerlimit'], option='lowerlimit')
value=lower_limit, option='lowerlimit')
else:
specs['lowerlimit'] = None
raise exception.InvalidConfigurationValue(
value=specs['lowerlimit'], option='lowerlimit')
else:
# 0: Set to no limit.(default)
# None: Keep current value.
specs['lowerlimit'] = '0' if reset else None
if 'upperreport' in specs:
if specs['upperreport'] not in ['on', 'off']:
LOG.debug('Illegal arguments. '
'upperreport is not on or off.'
'upperreport=%s', specs['upperreport'])
specs['upperreport'] = None
else:
specs['upperreport'] = None
if 'upperreport' in specs:
if specs['upperreport'] not in ['on', 'off']:
LOG.debug('Illegal arguments. '
'upperreport is not on or off.'
'upperreport=%s', specs['upperreport'])
specs['upperreport'] = 'off' if reset else None
else:
# off: Set to no report.(default)
# None: Keep current value.
specs['upperreport'] = 'off' if reset else None
def check_accesscontrol(self, ldsets, ld):
"""Check Logical disk is in-use or not."""

View File

@ -50,6 +50,9 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
ldname = ldname + '_m'
return ldname
def _convert_deleteldname(self, ldname):
return ldname + '_d'
def _select_ldnumber(self, used_ldns, max_ld_count):
"""Pick up unused LDN."""
for ldn in range(0, max_ld_count + 1):
@ -323,23 +326,17 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
self._convert_id2name,
self._select_leastused_poolnumber)
# check io limit.
specs = self.get_volume_type_qos_specs(volume)
self.check_io_parameter(specs)
# set io limit.
self._cli.set_io_limit(ldname, specs)
self._set_qos_spec(ldname, volume.volume_type_id)
LOG.debug('LD bound. '
'Name=%(name)s '
'Size=%(size)dGB '
'LDN=%(ldn)04xh '
'Pool=%(pool)04xh '
'Specs=%(specs)s.',
'Pool=%(pool)04xh.',
{'name': ldname,
'size': volume.size,
'ldn': ldn,
'pool': selected_pool,
'specs': specs})
'pool': selected_pool})
def _can_extend_capacity(self, new_size, pools, lds, ld):
rvs = {}
@ -489,12 +486,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
self._convert_id2name,
self._select_leastused_poolnumber)
# check io limit.
specs = self.get_volume_type_qos_specs(volume)
self.check_io_parameter(specs)
# set io limit.
self._cli.set_io_limit(volume_name, specs)
self._set_qos_spec(volume_name, volume.volume_type_id)
LOG.debug('LD bound. Name=%(name)s '
'Size=%(size)dGB '
@ -529,6 +521,17 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
'Source ID = %(src_id)s ) End.',
{'id': volume.id, 'src_id': src_vref.id})
def _set_qos_spec(self, ldname, volume_type_id, reset=False):
# check io limit.
specs = self.get_volume_type_qos_specs(volume_type_id)
self.correct_qos_parameter(specs, reset)
# set io limit.
self._cli.set_io_limit(ldname, specs)
LOG.debug('_set_qos_spec(Specs = %s) End.', specs)
return
def _validate_migrate_volume(self, volume, xml):
"""Validate source volume information."""
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
@ -604,23 +607,131 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
'fip': self._properties['cli_fip']})
return false_ret
self._migrate(volume, host, volume.volume_type_id,
self._validate_migrate_volume,
self._select_migrate_poolnumber)
LOG.debug('_migrate_volume(Volume ID = %(id)s, '
'Host = %(host)s) End.',
{'id': volume.id, 'host': host})
return (True, [])
def _validate_retype_volume(self, volume, xml):
"""Validate source volume information."""
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
# get ld object
ldname = self._validate_ld_exist(
lds, volume.id, self._properties['ld_name_format'])
# check rpl attribute.
ld = lds[ldname]
if ld['Purpose'] != '---':
msg = (_('Specified Logical Disk %(ld)s '
'has an invalid attribute (%(purpose)s).')
% {'ld': ldname, 'purpose': ld['Purpose']})
raise exception.VolumeBackendAPIException(data=msg)
return True
def _spec_is_changed(self, specdiff, resname):
res = specdiff.get(resname)
if (res is not None and res[0] != res[1]):
return True
return False
def _check_same_backend(self, diff):
if self._spec_is_changed(diff['extra_specs'], 'volume_backend_name'):
return False
if len(diff['extra_specs']) > 1:
return False
return True
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to the specified volume type.
:param context: The context used to run the method retype
:param volume: The original volume that was retype to this backend
:param new_type: The new volume type
:param diff: The difference between the two types
:param host: The target information
:returns: a boolean indicating whether the migration occurred, and
model_update
"""
msgparm = ('Volume ID = %(id)s, '
'New Type = %(type)s, '
'Diff = %(diff)s, '
'Destination Host = %(dsthost)s'
% {'id': volume.id,
'type': new_type,
'diff': diff,
'dsthost': host})
try:
ret = self._retype(context, volume, new_type, diff, host)
LOG.info('Retyped Volume (%s)', msgparm)
return ret
except exception.CinderException as e:
with excutils.save_and_reraise_exception():
LOG.warning('Failed to Retype Volume '
'(%(msgparm)s) (%(exception)s)',
{'msgparm': msgparm, 'exception': e})
def _retype(self, context, volume, new_type, diff, host):
"""Retype the volume to the specified volume type.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
"""
LOG.debug('_retype('
'Volume ID = %(id)s, '
'Volume Name = %(name)s, '
'New Type = %(type)s, '
'Diff = %(diff)s, '
'host = %(host)s) Start.',
{'id': volume.id,
'name': volume.name,
'type': new_type,
'diff': diff,
'host': host})
if self._check_same_backend(diff):
ldname = self._convert_id2name(volume)
reset = (diff['qos_specs'].get('consumer')[0] == 'back-end')
self._set_qos_spec(ldname, new_type['id'], reset)
LOG.debug('_retype(QoS setting only)(Volume ID = %(id)s, '
'Host = %(host)s) End.',
{'id': volume.id, 'host': host})
return True
self._migrate(volume,
host,
new_type['id'],
self._validate_retype_volume,
self._select_leastused_poolnumber)
LOG.debug('_retype(Volume ID = %(id)s, '
'Host = %(host)s) End.',
{'id': volume.id, 'host': host})
return True
def _migrate(self, volume, host, volume_type_id, validator, pool_selecter):
# bind LD.
(rvname,
ldn,
selected_pool) = self._bind_ld(volume,
volume.size,
self._validate_migrate_volume,
validator,
self._convert_id2migratename,
self._select_migrate_poolnumber,
pool_selecter,
host)
if selected_pool >= 0:
# check io limit.
specs = self.get_volume_type_qos_specs(volume)
self.check_io_parameter(specs)
# set io limit.
self._cli.set_io_limit(rvname, specs)
self._set_qos_spec(rvname, volume_type_id)
volume_properties = {
'mvname':
@ -637,12 +748,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
# replicate LD.
self._cli.backup_restore(volume_properties,
cli.UnpairWaitForMigrate)
LOG.debug('_migrate_volume(Volume ID = %(id)s, '
'Host = %(host)s) End.',
{'id': volume.id, 'host': host})
return (True, [])
return
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
@ -658,6 +764,12 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
LOG.debug('update_migrated_volume'
'(Volume ID = %(id)s, New Volume ID = %(new_id)s, '
'Status = %(status)s) Start.',
{'id': volume.id, 'new_id': new_volume.id,
'status': original_volume_status})
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
@ -666,11 +778,12 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
provider_location = None
if original_volume_status == 'available':
original_name = self._convert_id2name(volume)
temp_name = self._convert_id2name(new_volume)
new_name = self._convert_id2name(new_volume)
try:
if original_name in lds:
self._cli.unbind(original_name)
self._cli.changeldname(None, original_name, temp_name)
delete_ldname = self._convert_deleteldname(original_name)
self._cli.changeldname(None, delete_ldname, original_name)
self._cli.changeldname(None, original_name, new_name)
except exception.CinderException as e:
LOG.warning('Unable to rename the logical volume '
'(Volume ID = %(id)s), (%(exception)s)',
@ -684,6 +797,11 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
# The back-end will not be renamed.
name_id = new_volume._name_id or new_volume.id
provider_location = new_volume.provider_location
LOG.debug('update_migrated_volume(name_id = %(name_id)s, '
'provider_location = %(location)s) End.',
{'name_id': name_id, 'location': provider_location})
return {'_name_id': name_id, 'provider_location': provider_location}
def check_for_export(self, context, volume_id):
@ -761,19 +879,13 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
def remove_export(self, context, volume):
pass
def _detach_from_all(self, volume):
def _detach_from_all(self, ldname, xml):
LOG.debug('_detach_from_all Start.')
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
# get target LD Set.
ldset = self.get_ldset(ldsets)
ldname = self.get_ldname(volume.id, self._properties['ld_name_format'])
if ldname not in lds:
LOG.debug('LD `%s` already unbound?', ldname)
return False
ld = lds[ldname]
ldsetlist = []
@ -811,7 +923,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.',
{'ld': ldname, 'ldset': tagetldset['ldsetname']})
LOG.debug('_detach_from_all(Volume ID = %s) End.', volume.id)
LOG.debug('_detach_from_all(LD Name = %s) End.', ldname)
return True
def remove_export_snapshot(self, context, snapshot):
@ -1388,19 +1500,34 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
{'msgparm': msgparm, 'exception': e})
def _delete_volume(self, volume):
LOG.debug('_delete_volume Start.')
LOG.debug('_delete_volume id=%(id)s, _name_id=%(name_id)s Start.',
{'id': volume.id, 'name_id': volume._name_id})
detached = self._detach_from_all(volume)
xml = self._cli.view_all(self._properties['ismview_path'], detached)
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
ldname = self.get_ldname(volume.id,
ldname = self.get_ldname(volume.name_id,
self._properties['ld_name_format'])
# The volume to be deleted has '_d' at the end of the name
# when migrating with the same backend.
delete_ldname = self._convert_deleteldname(ldname)
if delete_ldname in lds:
ldname = delete_ldname
if ldname not in lds:
LOG.debug('LD `%s` already unbound?', ldname)
return
# If not migrating, detach from all hosts.
if ldname != delete_ldname:
detached = self._detach_from_all(ldname, xml)
xml = self._cli.view_all(self._properties['ismview_path'],
detached)
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
ld = lds[ldname]
if ld['RPL Attribute'] == 'IV':
@ -1790,12 +1917,7 @@ class MStorageDSVDriver(MStorageDriver):
self._select_volddr_poolnumber,
mv_capacity)
# check io limit.
specs = self.get_volume_type_qos_specs(volume)
self.check_io_parameter(specs)
# set io limit.
self._cli.set_io_limit(new_rvname, specs)
self._set_qos_spec(new_rvname, volume.volume_type_id)
if rv_capacity <= mv_capacity:
rvnumber = None
@ -1819,12 +1941,10 @@ class MStorageDSVDriver(MStorageDriver):
LOG.debug('_create_volume_from_snapshot(Volume ID = %(vol_id)s, '
'Snapshot ID(SV) = %(snap_id)s, '
'Snapshot ID(BV) = %(snapvol_id)s, '
'Specs=%(specs)s) End.',
'Snapshot ID(BV) = %(snapvol_id)s) End.',
{'vol_id': volume.id,
'snap_id': snapshot.id,
'snapvol_id': snapshot.volume_id,
'specs': specs})
'snapvol_id': snapshot.volume_id})
def revert_to_snapshot(self, context, volume, snapshot):
"""called to perform revert volume from snapshot.