Violin Memory iSCSI storage for 7000 series AFA
The latest 7000 series Violin arrays are based on a new OS called 'Concerto OS 7'. A new Violin iSCSI cinder driver is added in this change to support iSCSI in the 7000 series Openstack deployments. DocImpact Implements: blueprint vmem-7000-series-iscsi-driver Change-Id: I380dec25e7f8de8f56cb57da338cd499c065efa4
This commit is contained in:
parent
594ae5e40f
commit
7720fce509
@ -1023,26 +1023,30 @@ class XIODriverException(VolumeDriverException):
|
||||
|
||||
|
||||
# Violin Memory drivers
|
||||
class ViolinInvalidBackendConfig(CinderException):
|
||||
class ViolinInvalidBackendConfig(VolumeDriverException):
|
||||
message = _("Volume backend config is invalid: %(reason)s")
|
||||
|
||||
|
||||
class ViolinRequestRetryTimeout(CinderException):
|
||||
class ViolinRequestRetryTimeout(VolumeDriverException):
|
||||
message = _("Backend service retry timeout hit: %(timeout)s sec")
|
||||
|
||||
|
||||
class ViolinBackendErr(CinderException):
|
||||
class ViolinBackendErr(VolumeBackendAPIException):
|
||||
message = _("Backend reports: %(message)s")
|
||||
|
||||
|
||||
class ViolinBackendErrExists(CinderException):
|
||||
class ViolinBackendErrExists(VolumeBackendAPIException):
|
||||
message = _("Backend reports: item already exists")
|
||||
|
||||
|
||||
class ViolinBackendErrNotFound(CinderException):
|
||||
class ViolinBackendErrNotFound(NotFound):
|
||||
message = _("Backend reports: item not found")
|
||||
|
||||
|
||||
class ViolinResourceNotFound(NotFound):
|
||||
message = _("Backend reports: %(message)s")
|
||||
|
||||
|
||||
class BadHTTPResponseStatus(VolumeDriverException):
|
||||
message = _("Bad HTTP response status %(status)s")
|
||||
|
||||
|
@ -61,5 +61,7 @@ mock_client_conf = [
|
||||
'client.create_client',
|
||||
'client.delete_client',
|
||||
'adapter',
|
||||
'adapter.get_fc_info'
|
||||
'adapter.get_fc_info',
|
||||
'pool',
|
||||
'utility',
|
||||
]
|
||||
|
@ -19,6 +19,7 @@ Tests for Violin Memory 7000 Series All-Flash Array Common Driver
|
||||
import ddt
|
||||
import math
|
||||
import mock
|
||||
import six
|
||||
|
||||
from oslo_utils import units
|
||||
|
||||
@ -60,6 +61,325 @@ SRC_VOL = {"name": "volume-" + SRC_VOL_ID,
|
||||
}
|
||||
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
|
||||
CONNECTOR = {"initiator": INITIATOR_IQN}
|
||||
DEFAULT_DEDUP_POOL = {"storage_pool": 'PoolA',
|
||||
"storage_pool_id": 99,
|
||||
"dedup": True,
|
||||
"thin": True,
|
||||
}
|
||||
DEFAULT_THIN_POOL = {"storage_pool": 'PoolA',
|
||||
"storage_pool_id": 99,
|
||||
"dedup": False,
|
||||
"thin": True,
|
||||
}
|
||||
DEFAULT_THICK_POOL = {"storage_pool": 'PoolA',
|
||||
"storage_pool_id": 99,
|
||||
"dedup": False,
|
||||
"thin": False,
|
||||
}
|
||||
|
||||
# Note: select superfluous fields are removed for brevity
|
||||
STATS_STORAGE_POOL_RESPONSE = [({
|
||||
'availsize_mb': 1572827,
|
||||
'category': 'Virtual Device',
|
||||
'name': 'dedup-pool',
|
||||
'object_id': '487d1940-c53f-55c3-b1d5-073af43f80fc',
|
||||
'size_mb': 2097124,
|
||||
'storage_pool_id': 1,
|
||||
'usedsize_mb': 524297},
|
||||
{'category': 'Virtual Device',
|
||||
'name': 'dedup-pool',
|
||||
'object_id': '487d1940-c53f-55c3-b1d5-073af43f80fc',
|
||||
'physicaldevices': [
|
||||
{'availsize_mb': 524281,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.003',
|
||||
'object_id': '260f30b0-0300-59b5-b7b9-54aa55704a12',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 0},
|
||||
{'availsize_mb': 524281,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.004',
|
||||
'object_id': '7b58eda2-69da-5aec-9e06-6607934efa93',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 0},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.001',
|
||||
'object_id': '69adbea1-2349-5df5-a04a-abd7f14868b2',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 524281},
|
||||
{'availsize_mb': 524265,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.002',
|
||||
'object_id': 'a14a0e36-8901-5987-95d8-aa574c6138a2',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 16}],
|
||||
'size_mb': 2097124,
|
||||
'storage_pool_id': 1,
|
||||
'total_physicaldevices': 4,
|
||||
'usedsize_mb': 524297}),
|
||||
({'availsize': 0,
|
||||
'availsize_mb': 0,
|
||||
'category': None,
|
||||
'name': 'thick_pool_13531mgb',
|
||||
'object_id': '20610abd-4c58-546c-8905-bf42fab9a11b',
|
||||
'size': 0,
|
||||
'size_mb': 0,
|
||||
'storage_pool_id': 3,
|
||||
'tag': '',
|
||||
'total_physicaldevices': 0,
|
||||
'usedsize': 0,
|
||||
'usedsize_mb': 0},
|
||||
{'category': None,
|
||||
'name': 'thick_pool_13531mgb',
|
||||
'object_id': '20610abd-4c58-546c-8905-bf42fab9a11b',
|
||||
'resource_type': ['All'],
|
||||
'size': 0,
|
||||
'size_mb': 0,
|
||||
'storage_pool_id': 3,
|
||||
'tag': [''],
|
||||
'total_physicaldevices': 0,
|
||||
'usedsize': 0,
|
||||
'usedsize_mb': 0}),
|
||||
({'availsize_mb': 627466,
|
||||
'category': 'Virtual Device',
|
||||
'name': 'StoragePool',
|
||||
'object_id': '1af66d9a-f62e-5b69-807b-892b087fa0b4',
|
||||
'size_mb': 21139267,
|
||||
'storage_pool_id': 7,
|
||||
'usedsize_mb': 20511801},
|
||||
{'category': 'Virtual Device',
|
||||
'name': 'StoragePool',
|
||||
'object_id': '1af66d9a-f62e-5b69-807b-892b087fa0b4',
|
||||
'physicaldevices': [
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN02.000',
|
||||
'object_id': 'ecc775f1-1228-5131-8f68-4176001786ef',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN01.000',
|
||||
'object_id': '5c60812b-34d2-5473-b7bf-21e30ec70311',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN08.001',
|
||||
'object_id': 'eb6d06b7-8d6f-5d9d-b720-e86d8ad1beab',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN03.001',
|
||||
'object_id': '063aced7-1f8f-5e15-b36e-e9d34a2826fa',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN07.001',
|
||||
'object_id': 'ebf34594-2b92-51fe-a6a8-b6cf91f05b2b',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0A.000',
|
||||
'object_id': 'ff084188-b97f-5e30-9ff0-bc60e546ee06',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN06.001',
|
||||
'object_id': 'f9cbeadf-5524-5697-a3a6-667820e37639',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 167887,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN15.000',
|
||||
'object_id': 'aaacc124-26c9-519a-909a-a93d24f579a1',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 167887,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 0},
|
||||
{'availsize_mb': 229276,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN09.001',
|
||||
'object_id': '30967a84-56a4-52a5-ac3f-b4f544257bbd',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 819293},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN04.001',
|
||||
'object_id': 'd997eb42-55d4-5e4c-b797-c68b748e7e1f',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN05.001',
|
||||
'object_id': '56ecf98c-f10b-5bb5-9d3b-5af6037dad73',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0B.000',
|
||||
'object_id': 'cfb6f61c-508d-5394-8257-78b1f9bcad3b',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0C.000',
|
||||
'object_id': '7b0bcb51-5c7d-5752-9e18-392057e534f0',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0D.000',
|
||||
'object_id': 'b785a3b1-6316-50c3-b2e0-6bb0739499c6',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0E.000',
|
||||
'object_id': '76b9d038-b757-515a-b962-439a4fd85fd5',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN0F.000',
|
||||
'object_id': '9591d24a-70c4-5e80-aead-4b788202c698',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN10.000',
|
||||
'object_id': '2bb09a2b-9063-595b-9d7a-7e5fad5016db',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN11.000',
|
||||
'object_id': 'b9ff58eb-5e6e-5c79-bf95-fae424492519',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN12.000',
|
||||
'object_id': '6abd4fd6-9841-5978-bfcb-5d398d1715b4',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569},
|
||||
{'availsize_mb': 230303,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN13.000',
|
||||
'object_id': 'ffd5a4b7-0f50-5a71-bbba-57a348b96c68',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 818266},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'block',
|
||||
'name': 'BKSC:OTHDISK-MFCN14.000',
|
||||
'object_id': '52ffbbae-bdac-5194-ba6b-62ee17bfafce',
|
||||
'owner': 'lab-host2',
|
||||
'size_mb': 1048569,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 1048569}],
|
||||
'size_mb': 21139267,
|
||||
'storage_pool_id': 7,
|
||||
'tag': [''],
|
||||
'total_physicaldevices': 21,
|
||||
'usedsize_mb': 20511801}),
|
||||
({'availsize_mb': 1048536,
|
||||
'category': 'Virtual Device',
|
||||
'name': 'thick-pool',
|
||||
'object_id': 'c1e0becc-3497-5d74-977a-1e5a79769576',
|
||||
'size_mb': 2097124,
|
||||
'storage_pool_id': 9,
|
||||
'usedsize_mb': 1048588},
|
||||
{'category': 'Virtual Device',
|
||||
'name': 'thick-pool',
|
||||
'object_id': 'c1e0becc-3497-5d74-977a-1e5a79769576',
|
||||
'physicaldevices': [
|
||||
{'availsize_mb': 524255,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.001',
|
||||
'object_id': 'a90c4a11-33af-5530-80ca-2360fa477781',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 26},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.002',
|
||||
'object_id': '0a625ec8-2e80-5086-9644-2ea8dd5c32ec',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 524281},
|
||||
{'availsize_mb': 0,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.004',
|
||||
'object_id': '7018670b-3a79-5bdc-9d02-2d85602f361a',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 524281},
|
||||
{'availsize_mb': 524281,
|
||||
'connection_type': 'fc',
|
||||
'name': 'VIOLIN:CONCERTO ARRAY.003',
|
||||
'object_id': 'd859d47b-ca65-5d9d-a1c0-e288bbf39f48',
|
||||
'owner': 'lab-host1',
|
||||
'size_mb': 524281,
|
||||
'type': 'Direct-Access',
|
||||
'usedsize_mb': 0}],
|
||||
'size_mb': 2097124,
|
||||
'storage_pool_id': 9,
|
||||
'total_physicaldevices': 4,
|
||||
'usedsize_mb': 1048588})]
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@ -89,6 +409,9 @@ class V7000CommonTestCase(test.TestCase):
|
||||
config.use_igroups = False
|
||||
config.violin_request_timeout = 300
|
||||
config.container = 'myContainer'
|
||||
config.violin_pool_allocation_method = 'random'
|
||||
config.violin_dedup_only_pools = None
|
||||
config.violin_dedup_capable_pools = None
|
||||
return config
|
||||
|
||||
@mock.patch('vmemclient.open')
|
||||
@ -144,6 +467,8 @@ class V7000CommonTestCase(test.TestCase):
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
result = self.driver._create_lun(VOLUME)
|
||||
|
||||
@ -151,7 +476,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver.vmem_mg.lun.create_lun,
|
||||
'Create resource successfully.',
|
||||
VOLUME['id'], size_in_mb, False, False, size_in_mb,
|
||||
storage_pool=None)
|
||||
storage_pool_id=99)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_dedup_lun(self):
|
||||
@ -176,6 +501,8 @@ class V7000CommonTestCase(test.TestCase):
|
||||
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value=None)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_DEDUP_POOL)
|
||||
|
||||
result = self.driver._create_lun(vol)
|
||||
|
||||
@ -183,26 +510,51 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver.vmem_mg.lun.create_lun,
|
||||
'Create resource successfully.',
|
||||
VOLUME['id'], size_in_mb / 10, True, True, full_size_mb,
|
||||
storage_pool=None)
|
||||
storage_pool_id=99)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_fail_extend_dedup_lun(self):
|
||||
"""Volume extend fails when new size would shrink the volume."""
|
||||
failure = exception.VolumeDriverException
|
||||
vol = VOLUME.copy()
|
||||
vol['volume_type_id'] = '1'
|
||||
|
||||
size_in_mb = vol['size'] * units.Ki
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
type(self.driver.vmem_mg.utility).is_external_head = mock.PropertyMock(
|
||||
return_value=False)
|
||||
|
||||
# simulate extra specs of {'thin': 'true', 'dedupe': 'true'}
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
return_value="True")
|
||||
|
||||
failure = exception.VolumeDriverException
|
||||
self.assertRaises(failure, self.driver._extend_lun,
|
||||
vol, size_in_mb)
|
||||
|
||||
def test_extend_dedup_lun_external_head(self):
|
||||
"""Volume extend fails when new size would shrink the volume."""
|
||||
vol = VOLUME.copy()
|
||||
vol['volume_type_id'] = '1'
|
||||
new_volume_size = 10
|
||||
|
||||
response = {'success': True, 'message': 'Expand resource successfully'}
|
||||
conf = {
|
||||
'lun.extend_lun.return_value': response,
|
||||
}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
type(self.driver.vmem_mg.utility).is_external_head = mock.PropertyMock(
|
||||
return_value=False)
|
||||
|
||||
change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._extend_lun(VOLUME, new_volume_size)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vmem_mg.lun.extend_lun,
|
||||
response['message'], VOLUME['id'], change_in_size_mb)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_non_dedup_lun(self):
|
||||
"""Lun is successfully created."""
|
||||
vol = VOLUME.copy()
|
||||
@ -226,24 +578,28 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value=None)
|
||||
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
result = self.driver._create_lun(vol)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vmem_mg.lun.create_lun,
|
||||
'Create resource successfully.',
|
||||
VOLUME['id'], size_in_mb, False, False, full_size_mb,
|
||||
storage_pool=None)
|
||||
storage_pool_id=99)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_lun_fails(self):
|
||||
"""Array returns error that the lun already exists."""
|
||||
response = {'success': False,
|
||||
'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
|
||||
|
||||
conf = {
|
||||
'lun.create_lun.return_value': response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
self.assertIsNone(self.driver._create_lun(VOLUME))
|
||||
@ -253,6 +609,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
vol = VOLUME.copy()
|
||||
vol['size'] = 100
|
||||
vol['volume_type_id'] = '1'
|
||||
|
||||
response = {'success': True, 'msg': 'Create resource successfully.'}
|
||||
size_in_mb = vol['size'] * units.Ki
|
||||
full_size_mb = size_in_mb
|
||||
@ -268,6 +625,8 @@ class V7000CommonTestCase(test.TestCase):
|
||||
# simulates extra specs: {'storage_pool', 'StoragePool'}
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePool")
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
result = self.driver._create_lun(vol)
|
||||
|
||||
@ -275,7 +634,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver.vmem_mg.lun.create_lun,
|
||||
'Create resource successfully.',
|
||||
VOLUME['id'], size_in_mb, False, False, full_size_mb,
|
||||
storage_pool="StoragePool")
|
||||
storage_pool_id=99)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_delete_lun(self):
|
||||
@ -294,13 +653,13 @@ class V7000CommonTestCase(test.TestCase):
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vmem_mg.lun.delete_lun,
|
||||
success_msgs, VOLUME['id'], True)
|
||||
success_msgs, VOLUME['id'])
|
||||
self.driver._delete_lun_snapshot_bookkeeping.assert_called_with(
|
||||
VOLUME['id'])
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
# TODO(rlucio) More delete lun failure cases to be added after
|
||||
# TODO(vthirumalai): More delete lun failure cases to be added after
|
||||
# collecting the possible responses from Concerto
|
||||
|
||||
def test_extend_lun(self):
|
||||
@ -344,19 +703,22 @@ class V7000CommonTestCase(test.TestCase):
|
||||
"""Create a new cinder volume from a given snapshot of a lun."""
|
||||
object_id = '12345'
|
||||
vdev_id = 11111
|
||||
lun_info_response = {'subType': 'THICK',
|
||||
'virtualDeviceID': vdev_id}
|
||||
response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy TimeMark successfully.'}
|
||||
lun_info = {'virtualDeviceID': vdev_id}
|
||||
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
|
||||
|
||||
conf = {
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_snapshot_to_new_lun.return_value': response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._compress_snapshot_id = mock.Mock(
|
||||
return_value=compressed_snap_id)
|
||||
self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
|
||||
|
||||
result = self.driver._create_volume_from_snapshot(SNAPSHOT, VOLUME)
|
||||
@ -364,9 +726,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver.vmem_mg.lun.copy_snapshot_to_new_lun.assert_called_with(
|
||||
source_lun=SNAPSHOT['volume_id'],
|
||||
source_snapshot_comment=compressed_snap_id,
|
||||
destination=VOLUME['id'], storage_pool=None)
|
||||
self.driver.vmem_mg.lun.get_lun_info.assert_called_with(
|
||||
object_id=object_id)
|
||||
destination=VOLUME['id'], storage_pool_id=99)
|
||||
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
|
||||
SNAPSHOT['volume_id'], dest_vdev_id=vdev_id)
|
||||
|
||||
@ -379,24 +739,27 @@ class V7000CommonTestCase(test.TestCase):
|
||||
dest_vol['volume_type_id'] = '1'
|
||||
object_id = '12345'
|
||||
vdev_id = 11111
|
||||
lun_info_response = {'subType': 'THICK',
|
||||
'virtualDeviceID': vdev_id}
|
||||
response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy TimeMark successfully.'}
|
||||
lun_info = {'virtualDeviceID': vdev_id}
|
||||
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
|
||||
|
||||
conf = {
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_snapshot_to_new_lun.return_value': response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._compress_snapshot_id = mock.Mock(
|
||||
return_value=compressed_snap_id)
|
||||
self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
|
||||
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
|
||||
|
||||
# simulates extra specs: {'storage_pool', 'StoragePool'}
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePool")
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
return_value="False")
|
||||
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
|
||||
|
||||
result = self.driver._create_volume_from_snapshot(SNAPSHOT, dest_vol)
|
||||
|
||||
@ -404,18 +767,24 @@ class V7000CommonTestCase(test.TestCase):
|
||||
|
||||
def test_create_volume_from_snapshot_fails(self):
|
||||
"""Array returns error that the lun already exists."""
|
||||
vdev_id = 11111
|
||||
lun_info_response = {'subType': 'THICK',
|
||||
'virtualDeviceID': vdev_id}
|
||||
response = {'success': False,
|
||||
'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
|
||||
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
|
||||
failure = exception.ViolinBackendErrExists
|
||||
|
||||
conf = {
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_snapshot_to_new_lun.return_value': response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver._compress_snapshot_id = mock.Mock(
|
||||
return_value=compressed_snap_id)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
|
||||
|
||||
@ -431,15 +800,19 @@ class V7000CommonTestCase(test.TestCase):
|
||||
dest_vol['size'] = size
|
||||
larger_size_flag = True
|
||||
object_id = fake.OBJECT_ID
|
||||
response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy Snapshot resource successfully'}
|
||||
lun_info_response = {'subType': 'THICK'}
|
||||
copy_response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy Snapshot resource successfully'}
|
||||
|
||||
conf = {
|
||||
'lun.copy_lun_to_new_lun.return_value': response,
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_lun_to_new_lun.return_value': copy_response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._ensure_snapshot_resource_area = mock.Mock()
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
self.driver._wait_for_lun_or_snap_copy = mock.Mock()
|
||||
self.driver._extend_lun = mock.Mock()
|
||||
|
||||
@ -448,8 +821,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.driver._ensure_snapshot_resource_area.assert_called_with(
|
||||
SRC_VOL['id'])
|
||||
self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
|
||||
source=SRC_VOL['id'], destination=dest_vol['id'],
|
||||
storage_pool=None)
|
||||
source=SRC_VOL['id'], destination=VOLUME['id'], storage_pool_id=99)
|
||||
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
|
||||
SRC_VOL['id'], dest_obj_id=object_id)
|
||||
if larger_size_flag:
|
||||
@ -470,12 +842,14 @@ class V7000CommonTestCase(test.TestCase):
|
||||
larger_size_flag = True
|
||||
dest_vol['volume_type_id'] = fake.VOLUME_TYPE_ID
|
||||
object_id = fake.OBJECT_ID
|
||||
response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy Snapshot resource successfully'}
|
||||
lun_info_response = {'subType': 'THICK'}
|
||||
copy_response = {'success': True,
|
||||
'object_id': object_id,
|
||||
'msg': 'Copy Snapshot resource successfully'}
|
||||
|
||||
conf = {
|
||||
'lun.copy_lun_to_new_lun.return_value': response,
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_lun_to_new_lun.return_value': copy_response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._ensure_snapshot_resource_area = mock.Mock()
|
||||
@ -485,6 +859,11 @@ class V7000CommonTestCase(test.TestCase):
|
||||
# simulates extra specs: {'storage_pool', 'StoragePool'}
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePool")
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THIN_POOL)
|
||||
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
side_effect=["True", "False"])
|
||||
|
||||
result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol)
|
||||
|
||||
@ -492,7 +871,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
SRC_VOL['id'])
|
||||
self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
|
||||
source=SRC_VOL['id'], destination=dest_vol['id'],
|
||||
storage_pool="StoragePool")
|
||||
storage_pool_id=99)
|
||||
self.driver._wait_for_lun_or_snap_copy.assert_called_with(
|
||||
SRC_VOL['id'], dest_obj_id=object_id)
|
||||
if larger_size_flag:
|
||||
@ -504,18 +883,56 @@ class V7000CommonTestCase(test.TestCase):
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_lun_from_lun_fails(self):
|
||||
"""lun full clone to new volume completes successfully."""
|
||||
"""lun full clone to new volume fails correctly."""
|
||||
failure = exception.ViolinBackendErr
|
||||
response = {'success': False,
|
||||
'msg': 'Snapshot Resource is not created '
|
||||
'for this virtual device. Error: 0x0901008c'}
|
||||
lun_info_response = {
|
||||
'subType': 'THICK',
|
||||
}
|
||||
copy_response = {
|
||||
'success': False,
|
||||
'msg': 'Snapshot Resource is not created ' +
|
||||
'for this virtual device. Error: 0x0901008c',
|
||||
}
|
||||
|
||||
conf = {
|
||||
'lun.copy_lun_to_new_lun.return_value': response,
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
'lun.copy_lun_to_new_lun.return_value': copy_response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._ensure_snapshot_resource_area = mock.Mock()
|
||||
self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
self.assertRaises(failure, self.driver._create_lun_from_lun,
|
||||
SRC_VOL, VOLUME)
|
||||
|
||||
def test_create_lun_from_thin_lun_fails(self):
|
||||
"""lun full clone of thin lun is not supported."""
|
||||
failure = exception.ViolinBackendErr
|
||||
lun_info_response = {
|
||||
'subType': 'THIN',
|
||||
}
|
||||
|
||||
conf = {
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
|
||||
self.assertRaises(failure, self.driver._create_lun_from_lun,
|
||||
SRC_VOL, VOLUME)
|
||||
|
||||
def test_create_lun_from_dedup_lun_fails(self):
|
||||
"""lun full clone of dedup lun is not supported."""
|
||||
failure = exception.ViolinBackendErr
|
||||
lun_info_response = {
|
||||
'subType': 'DEDUP',
|
||||
}
|
||||
|
||||
conf = {
|
||||
'lun.get_lun_info.return_value': lun_info_response,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
|
||||
self.assertRaises(failure, self.driver._create_lun_from_lun,
|
||||
SRC_VOL, VOLUME)
|
||||
@ -583,6 +1000,8 @@ class V7000CommonTestCase(test.TestCase):
|
||||
snap = self.driver.vmem_mg.snapshot
|
||||
snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False)
|
||||
snap.create_snapshot_resource = mock.Mock(return_value=result_dict)
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
with mock.patch('cinder.db.sqlalchemy.api.volume_get',
|
||||
return_value=VOLUME):
|
||||
@ -604,7 +1023,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
expansion_max_size=
|
||||
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
|
||||
enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
|
||||
storage_pool=None)
|
||||
storage_pool_id=99)
|
||||
|
||||
def test_ensure_snapshot_resource_area_with_storage_pool(self):
|
||||
|
||||
@ -622,6 +1041,11 @@ class V7000CommonTestCase(test.TestCase):
|
||||
# simulates extra specs: {'storage_pool', 'StoragePool'}
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePool")
|
||||
self.driver._get_storage_pool = mock.Mock(
|
||||
return_value=DEFAULT_THICK_POOL)
|
||||
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
side_effect=["True", "False"])
|
||||
|
||||
with mock.patch('cinder.db.sqlalchemy.api.volume_get',
|
||||
return_value=dest_vol):
|
||||
@ -643,7 +1067,7 @@ class V7000CommonTestCase(test.TestCase):
|
||||
expansion_max_size=
|
||||
v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
|
||||
enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
|
||||
storage_pool="StoragePool")
|
||||
storage_pool_id=99)
|
||||
|
||||
def test_ensure_snapshot_resource_policy(self):
|
||||
result_dict = {'success': True, 'res': 'Successful'}
|
||||
@ -715,19 +1139,43 @@ class V7000CommonTestCase(test.TestCase):
|
||||
def test_delete_lun_snapshot(self):
|
||||
response = {'success': True, 'msg': 'Delete TimeMark successfully'}
|
||||
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
|
||||
oid = 'abc123-abc123abc123-abc123'
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
conf = {
|
||||
'snapshot.snapshot_comment_to_object_id.return_value': oid,
|
||||
'snapshot.delete_lun_snapshot.return_value': response,
|
||||
}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._compress_snapshot_id = mock.Mock(
|
||||
return_value=compressed_snap_id)
|
||||
|
||||
self.assertIsNone(self.driver._delete_lun_snapshot(SNAPSHOT))
|
||||
result = self.driver._delete_lun_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vmem_mg.snapshot.delete_lun_snapshot,
|
||||
'Delete TimeMark successfully',
|
||||
lun=VOLUME_ID,
|
||||
comment=compressed_snap_id)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_delete_lun_snapshot_with_retry(self):
|
||||
response = [
|
||||
{'success': False, 'msg': 'Error 0x50f7564c'},
|
||||
{'success': True, 'msg': 'Delete TimeMark successfully'}]
|
||||
compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
|
||||
oid = 'abc123-abc123abc123-abc123'
|
||||
|
||||
conf = {
|
||||
'snapshot.snapshot_comment_to_object_id.return_value': oid,
|
||||
'snapshot.delete_lun_snapshot.side_effect': response,
|
||||
}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._compress_snapshot_id = mock.Mock(
|
||||
return_value=compressed_snap_id)
|
||||
|
||||
result = self.driver._delete_lun_snapshot(SNAPSHOT)
|
||||
|
||||
self.assertIsNone(result)
|
||||
self.assertEqual(
|
||||
len(response),
|
||||
self.driver.vmem_mg.snapshot.delete_lun_snapshot.call_count)
|
||||
|
||||
def test_wait_for_lun_or_snap_copy_completes_for_snap(self):
|
||||
"""waiting for a snapshot to copy succeeds."""
|
||||
@ -799,4 +1247,173 @@ class V7000CommonTestCase(test.TestCase):
|
||||
|
||||
m_get_admin_context.assert_called_with()
|
||||
m_get_volume_type.assert_called_with(None, vol['volume_type_id'])
|
||||
self.assertEqual('test_value', result)
|
||||
self.assertEqual(result, 'test_value')
|
||||
|
||||
def test_process_extra_specs_dedup(self):
|
||||
'''Process the given extra specs and fill the required dict.'''
|
||||
vol = VOLUME.copy()
|
||||
vol['volume_type_id'] = 1
|
||||
spec_dict = {
|
||||
'pool_type': 'dedup',
|
||||
'size_mb': 205,
|
||||
'thick': False,
|
||||
'dedup': True,
|
||||
'thin': True}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
return_value="True")
|
||||
|
||||
result = self.driver._process_extra_specs(vol)
|
||||
self.assertEqual(spec_dict, result)
|
||||
|
||||
def test_process_extra_specs_no_specs(self):
|
||||
'''Fill the required spec_dict in the absence of extra specs.'''
|
||||
vol = VOLUME.copy()
|
||||
spec_dict = {
|
||||
'pool_type': 'thick',
|
||||
'size_mb': 2048,
|
||||
'thick': True,
|
||||
'dedup': False,
|
||||
'thin': False}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
return_value="False")
|
||||
|
||||
result = self.driver._process_extra_specs(vol)
|
||||
self.assertEqual(spec_dict, result)
|
||||
|
||||
def test_process_extra_specs_no_specs_thin(self):
|
||||
'''Fill the required spec_dict in the absence of extra specs.'''
|
||||
vol = VOLUME.copy()
|
||||
spec_dict = {
|
||||
'pool_type': 'thin',
|
||||
'size_mb': 205,
|
||||
'thick': False,
|
||||
'dedup': False,
|
||||
'thin': True}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
return_value="False")
|
||||
|
||||
save_thin = self.conf.san_thin_provision
|
||||
self.conf.san_thin_provision = True
|
||||
result = self.driver._process_extra_specs(vol)
|
||||
self.assertEqual(spec_dict, result)
|
||||
self.conf.san_thin_provision = save_thin
|
||||
|
||||
def test_process_extra_specs_thin(self):
|
||||
'''Fill the required spec_dict in the absence of extra specs.'''
|
||||
vol = VOLUME.copy()
|
||||
vol['volume_type_id'] = 1
|
||||
spec_dict = {
|
||||
'pool_type': 'thin',
|
||||
'size_mb': 205,
|
||||
'thick': False,
|
||||
'dedup': False,
|
||||
'thin': True}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._get_volume_type_extra_spec = mock.Mock(
|
||||
side_effect=["True", "False"])
|
||||
|
||||
result = self.driver._process_extra_specs(vol)
|
||||
self.assertEqual(spec_dict, result)
|
||||
|
||||
def test_get_storage_pool_with_extra_specs(self):
|
||||
'''Select a suitable pool based on specified extra specs.'''
|
||||
vol = VOLUME.copy()
|
||||
vol['volume_type_id'] = 1
|
||||
pool_type = "thick"
|
||||
|
||||
selected_pool = {
|
||||
'storage_pool': 'StoragePoolA',
|
||||
'storage_pool_id': 99,
|
||||
'dedup': False,
|
||||
'thin': False}
|
||||
|
||||
conf = {
|
||||
'pool.select_storage_pool.return_value': selected_pool,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePoolA",
|
||||
)
|
||||
|
||||
result = self.driver._get_storage_pool(
|
||||
vol,
|
||||
100,
|
||||
pool_type,
|
||||
"create_lun")
|
||||
|
||||
self.assertEqual(result, selected_pool)
|
||||
|
||||
def test_get_storage_pool_configured_pools(self):
|
||||
'''Select a suitable pool based on configured pools.'''
|
||||
vol = VOLUME.copy()
|
||||
pool_type = "dedup"
|
||||
|
||||
self.conf.violin_dedup_only_pools = ['PoolA', 'PoolB']
|
||||
self.conf.violin_dedup_capable_pools = ['PoolC', 'PoolD']
|
||||
|
||||
selected_pool = {
|
||||
'dedup': True,
|
||||
'storage_pool': 'PoolA',
|
||||
'storage_pool_id': 123,
|
||||
'thin': True,
|
||||
}
|
||||
|
||||
conf = {
|
||||
'pool.select_storage_pool.return_value': selected_pool,
|
||||
}
|
||||
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._get_violin_extra_spec = mock.Mock(
|
||||
return_value="StoragePoolA")
|
||||
|
||||
result = self.driver._get_storage_pool(
|
||||
vol,
|
||||
100,
|
||||
pool_type,
|
||||
"create_lun",
|
||||
)
|
||||
|
||||
self.assertEqual(result, selected_pool)
|
||||
self.driver.vmem_mg.pool.select_storage_pool.assert_called_with(
|
||||
100,
|
||||
pool_type,
|
||||
None,
|
||||
self.conf.violin_dedup_only_pools,
|
||||
self.conf.violin_dedup_capable_pools,
|
||||
"random",
|
||||
"create_lun",
|
||||
)
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
'''Getting stats works successfully.'''
|
||||
|
||||
self.conf.reserved_percentage = 0
|
||||
|
||||
expected_answers = {
|
||||
'vendor_name': 'Violin Memory, Inc.',
|
||||
'reserved_percentage': 0,
|
||||
'QoS_support': False,
|
||||
'free_capacity_gb': 2781,
|
||||
'total_capacity_gb': 14333,
|
||||
'consistencygroup_support': False,
|
||||
}
|
||||
owner = 'lab-host1'
|
||||
|
||||
def lookup(value):
|
||||
return six.text_type(value) + '.vmem.com'
|
||||
conf = {
|
||||
'pool.get_storage_pools.return_value': STATS_STORAGE_POOL_RESPONSE,
|
||||
}
|
||||
self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
|
||||
with mock.patch('socket.getfqdn', side_effect=lookup):
|
||||
result = self.driver._get_volume_stats(owner)
|
||||
|
||||
self.assertDictEqual(expected_answers, result)
|
||||
|
368
cinder/tests/unit/test_v7000_iscsi.py
Normal file
368
cinder/tests/unit/test_v7000_iscsi.py
Normal file
@ -0,0 +1,368 @@
|
||||
# Copyright 2016 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Violin Memory 7000 Series All-Flash Array ISCSI Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_vmem_client as vmemclient
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.violin import v7000_common
|
||||
from cinder.volume.drivers.violin import v7000_iscsi
|
||||
|
||||
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
|
||||
VOLUME = {
|
||||
"name": "volume-" + VOLUME_ID,
|
||||
"id": VOLUME_ID,
|
||||
"display_name": "fake_volume",
|
||||
"size": 2,
|
||||
"host": "myhost",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
|
||||
SNAPSHOT = {
|
||||
"name": "snapshot-" + SNAPSHOT_ID,
|
||||
"id": SNAPSHOT_ID,
|
||||
"volume_id": VOLUME_ID,
|
||||
"volume_name": "volume-" + VOLUME_ID,
|
||||
"volume_size": 2,
|
||||
"display_name": "fake_snapshot",
|
||||
"volume": VOLUME,
|
||||
}
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 2,
|
||||
"host": "myhost",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 2,
|
||||
"host": "myhost",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
|
||||
CONNECTOR = {
|
||||
"initiator": INITIATOR_IQN,
|
||||
"host": "irrelevant",
|
||||
"ip": "1.2.3.4",
|
||||
}
|
||||
TARGET = "iqn.2004-02.com.vmem:%s" % VOLUME['id']
|
||||
|
||||
GET_VOLUME_STATS_RESPONSE = {
|
||||
'vendor_name': 'Violin Memory, Inc.',
|
||||
'reserved_percentage': 0,
|
||||
'QoS_support': False,
|
||||
'free_capacity_gb': 4094,
|
||||
'total_capacity_gb': 2558,
|
||||
}
|
||||
|
||||
CLIENT_INFO = {
|
||||
'issanip_enabled': False,
|
||||
'sanclient_id': 7,
|
||||
'ISCSIDevices':
|
||||
[{'category': 'Virtual Device',
|
||||
'sizeMB': VOLUME['size'] * 1024,
|
||||
'name': VOLUME['id'],
|
||||
'object_id': 'v0000058',
|
||||
'access': 'ReadWrite',
|
||||
'ISCSITarget':
|
||||
{'name': TARGET,
|
||||
'startingLun': '0',
|
||||
'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1',
|
||||
'object_id': '2c68c1a4-67bb-59b3-93df-58bcdf422a66',
|
||||
'access': 'ReadWrite',
|
||||
'isInfiniBand': 'false',
|
||||
'iscsiurl': ''},
|
||||
'type': 'SAN',
|
||||
'lun': '8',
|
||||
'size': VOLUME['size'] * 1024 * 1024}],
|
||||
'name': 'lab-srv3377',
|
||||
'isiscsi_enabled': True,
|
||||
'clusterName': '',
|
||||
'ipAddress': '',
|
||||
'isclustered': False,
|
||||
'username': '',
|
||||
'isbmr_enabled': False,
|
||||
'useracl': None,
|
||||
'isfibrechannel_enabled': False,
|
||||
'iSCSIPolicy':
|
||||
{'initiators': ['iqn.1993-08.org.debian:01:1ebcd244a059'],
|
||||
'authentication':
|
||||
{'mutualCHAP':
|
||||
{'enabled': False,
|
||||
'user': ''},
|
||||
'enabled': False,
|
||||
'defaultUser': ''},
|
||||
'accessType': 'stationary'},
|
||||
'ISCSITargetList':
|
||||
[{'name': 'iqn.2004-02.com.vmem:lab-fsp-mga.openstack',
|
||||
'startingLun': '0',
|
||||
'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1',
|
||||
'object_id': '716cc60a-576a-55f1-bfe3-af4a21ca5554',
|
||||
'access': 'ReadWrite',
|
||||
'isInfiniBand': 'false',
|
||||
'iscsiurl': ''}],
|
||||
'type': 'Windows',
|
||||
'persistent_reservation': True,
|
||||
'isxboot_enabled': False}
|
||||
|
||||
|
||||
class V7000ISCSIDriverTestCase(test.TestCase):
|
||||
"""Test cases for VMEM ISCSI driver."""
|
||||
def setUp(self):
|
||||
super(V7000ISCSIDriverTestCase, self).setUp()
|
||||
self.conf = self.setup_configuration()
|
||||
self.driver = v7000_iscsi.V7000ISCSIDriver(configuration=self.conf)
|
||||
self.driver.gateway_iscsi_ip_addresses = [
|
||||
'192.168.91.1', '192.168.92.1', '192.168.93.1', '192.168.94.1']
|
||||
self.stats = {}
|
||||
self.driver.set_initialized()
|
||||
|
||||
def tearDown(self):
|
||||
super(V7000ISCSIDriverTestCase, self).tearDown()
|
||||
|
||||
def setup_configuration(self):
|
||||
config = mock.Mock(spec=conf.Configuration)
|
||||
config.volume_backend_name = 'v7000_iscsi'
|
||||
config.san_ip = '8.8.8.8'
|
||||
config.san_login = 'admin'
|
||||
config.san_password = ''
|
||||
config.san_thin_provision = False
|
||||
config.san_is_local = False
|
||||
config.use_igroups = False
|
||||
config.request_timeout = 300
|
||||
return config
|
||||
|
||||
def setup_mock_concerto(self, m_conf=None):
|
||||
"""Create a fake Concerto communication object."""
|
||||
_m_concerto = mock.Mock(name='Concerto',
|
||||
version='1.1.1',
|
||||
spec=vmemclient.mock_client_conf)
|
||||
|
||||
if m_conf:
|
||||
_m_concerto.configure_mock(**m_conf)
|
||||
|
||||
return _m_concerto
|
||||
|
||||
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error(self, m_setup_func):
|
||||
"""No setup errors are found."""
|
||||
result = self.driver.check_for_setup_error()
|
||||
m_setup_func.assert_called_with()
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_volume(self):
|
||||
"""Volume created successfully."""
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume(VOLUME)
|
||||
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
self.driver.common._create_volume_from_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
|
||||
|
||||
self.driver.common._create_volume_from_snapshot.assert_called_with(
|
||||
SNAPSHOT, VOLUME)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_cloned_volume(self):
|
||||
self.driver.common._create_lun_from_lun = mock.Mock()
|
||||
|
||||
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
|
||||
|
||||
self.driver.common._create_lun_from_lun.assert_called_with(
|
||||
SRC_VOL, VOLUME)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_delete_volume(self):
|
||||
"""Volume deleted successfully."""
|
||||
self.driver.common._delete_lun = mock.Mock()
|
||||
|
||||
result = self.driver.delete_volume(VOLUME)
|
||||
|
||||
self.driver.common._delete_lun.assert_called_with(VOLUME)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_extend_volume(self):
|
||||
"""Volume extended successfully."""
|
||||
new_size = 10
|
||||
self.driver.common._extend_lun = mock.Mock()
|
||||
|
||||
result = self.driver.extend_volume(VOLUME, new_size)
|
||||
|
||||
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self.driver.common._create_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.create_snapshot(SNAPSHOT)
|
||||
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
self.driver.common._delete_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.delete_snapshot(SNAPSHOT)
|
||||
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
self.driver._update_volume_stats = mock.Mock()
|
||||
self.driver._update_volume_stats()
|
||||
|
||||
result = self.driver.get_volume_stats(True)
|
||||
|
||||
self.driver._update_volume_stats.assert_called_with()
|
||||
self.assertEqual(self.driver.stats, result)
|
||||
|
||||
def test_update_volume_stats(self):
|
||||
"""Mock query to the backend collects stats on all physical devices."""
|
||||
backend_name = self.conf.volume_backend_name
|
||||
|
||||
self.driver.common._get_volume_stats = mock.Mock(
|
||||
return_value=GET_VOLUME_STATS_RESPONSE,
|
||||
)
|
||||
|
||||
result = self.driver._update_volume_stats()
|
||||
|
||||
self.driver.common._get_volume_stats.assert_called_with(
|
||||
self.conf.san_ip)
|
||||
self.assertEqual(backend_name,
|
||||
self.driver.stats['volume_backend_name'])
|
||||
self.assertEqual('iSCSI',
|
||||
self.driver.stats['storage_protocol'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_initialize_connection(self):
|
||||
lun_id = 1
|
||||
response = {'success': True, 'msg': 'None'}
|
||||
|
||||
conf = {
|
||||
'client.create_client.return_value': response,
|
||||
'client.create_iscsi_target.return_value': response,
|
||||
}
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
self.driver._get_iqn = mock.Mock(return_value=TARGET)
|
||||
self.driver._export_lun = mock.Mock(return_value=lun_id)
|
||||
|
||||
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
|
||||
|
||||
self.driver._export_lun.assert_called_with(VOLUME, TARGET, CONNECTOR)
|
||||
self.assertEqual("iscsi", props['driver_volume_type'])
|
||||
self.assertFalse(props['data']['target_discovered'])
|
||||
self.assertEqual(TARGET, props['data']['target_iqn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(VOLUME['id'], props['data']['volume_id'])
|
||||
|
||||
def test_terminate_connection(self):
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver._get_iqn = mock.Mock(return_value=TARGET)
|
||||
self.driver._unexport_lun = mock.Mock()
|
||||
|
||||
result = self.driver.terminate_connection(VOLUME, CONNECTOR)
|
||||
|
||||
self.driver._unexport_lun.assert_called_with(VOLUME, TARGET, CONNECTOR)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_export_lun(self):
|
||||
lun_id = '1'
|
||||
response = {'success': True, 'msg': 'Assign device successfully'}
|
||||
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto()
|
||||
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._export_lun(VOLUME, TARGET, CONNECTOR)
|
||||
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vmem_mg.lun.assign_lun_to_iscsi_target,
|
||||
self.driver._is_lun_id_ready,
|
||||
'Assign device successfully',
|
||||
[VOLUME['id'], TARGET],
|
||||
[VOLUME['id'], CONNECTOR['host']])
|
||||
self.driver._get_lun_id.assert_called_with(
|
||||
VOLUME['id'], CONNECTOR['host'])
|
||||
self.assertEqual(lun_id, result)
|
||||
|
||||
def test_export_lun_fails_with_exception(self):
|
||||
lun_id = '1'
|
||||
response = {'success': False, 'msg': 'Generic error'}
|
||||
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
side_effect=exception.ViolinBackendErr(response['msg']))
|
||||
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
self.assertRaises(exception.ViolinBackendErr,
|
||||
self.driver._export_lun,
|
||||
VOLUME, TARGET, CONNECTOR)
|
||||
|
||||
def test_unexport_lun(self):
|
||||
response = {'success': True, 'msg': 'Unassign device successfully'}
|
||||
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto()
|
||||
self.driver.common._send_cmd = mock.Mock(
|
||||
return_value=response)
|
||||
|
||||
result = self.driver._unexport_lun(VOLUME, TARGET, CONNECTOR)
|
||||
|
||||
self.driver.common._send_cmd.assert_called_with(
|
||||
self.driver.common.vmem_mg.lun.unassign_lun_from_iscsi_target,
|
||||
"Unassign device successfully",
|
||||
VOLUME['id'], TARGET, True)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_is_lun_id_ready(self):
|
||||
lun_id = '1'
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto()
|
||||
|
||||
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._is_lun_id_ready(
|
||||
VOLUME['id'], CONNECTOR['host'])
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_get_lun_id(self):
|
||||
|
||||
conf = {
|
||||
'client.get_client_info.return_value': CLIENT_INFO,
|
||||
}
|
||||
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
|
||||
|
||||
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
|
||||
|
||||
self.assertEqual(8, result)
|
@ -31,6 +31,7 @@ driver documentation for more information.
|
||||
import math
|
||||
import re
|
||||
import six
|
||||
import socket
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
@ -41,7 +42,7 @@ from oslo_utils import units
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import api
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder import utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
@ -67,12 +68,26 @@ CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE = None
|
||||
CONCERTO_DEFAULT_SRA_ENABLE_SHRINK = False
|
||||
CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS = 1000
|
||||
CONCERTO_DEFAULT_POLICY_RETENTION_MODE = 'All'
|
||||
|
||||
CONCERTO_LUN_TYPE_THICK = 'THICK'
|
||||
LUN_ALLOC_SZ = 10
|
||||
|
||||
violin_opts = [
|
||||
cfg.IntOpt('violin_request_timeout',
|
||||
default=300,
|
||||
help='Global backend request timeout, in seconds.'),
|
||||
cfg.ListOpt('violin_dedup_only_pools',
|
||||
default=[],
|
||||
help='Storage pools to be used to setup dedup luns only.'),
|
||||
cfg.ListOpt('violin_dedup_capable_pools',
|
||||
default=[],
|
||||
help='Storage pools capable of dedup and other luns.'),
|
||||
cfg.StrOpt('violin_pool_allocation_method',
|
||||
default='random',
|
||||
choices=['random', 'largest', 'smallest'],
|
||||
help='Method of choosing a storage pool for a lun.'),
|
||||
cfg.ListOpt('violin_iscsi_target_ips',
|
||||
default=[],
|
||||
help='List of target iSCSI addresses to use.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -93,15 +108,26 @@ class V7000Common(object):
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Gateway VIP is not set'))
|
||||
|
||||
self.vmem_mg = vmemclient.open(self.config.san_ip,
|
||||
self.config.san_login,
|
||||
self.config.san_password,
|
||||
keepalive=True)
|
||||
if vmemclient:
|
||||
self.vmem_mg = vmemclient.open(self.config.san_ip,
|
||||
self.config.san_login,
|
||||
self.config.san_password,
|
||||
keepalive=True)
|
||||
|
||||
if self.vmem_mg is None:
|
||||
msg = _('Failed to connect to array')
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if self.vmem_mg.utility.is_external_head:
|
||||
# With an external storage pool configuration is a must
|
||||
if (self.config.violin_dedup_only_pools == [] and
|
||||
self.config.violin_dedup_capable_pools == []):
|
||||
|
||||
LOG.warning(_LW("Storage pools not configured"))
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Storage pool configuration is '
|
||||
'mandatory for external head'))
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
if vmemclient is None:
|
||||
@ -120,43 +146,30 @@ class V7000Common(object):
|
||||
|
||||
:param volume: volume object provided by the Manager
|
||||
"""
|
||||
thin_lun = False
|
||||
dedup = False
|
||||
spec_dict = {}
|
||||
selected_pool = {}
|
||||
|
||||
size_mb = volume['size'] * units.Ki
|
||||
full_size_mb = size_mb
|
||||
pool = None
|
||||
|
||||
LOG.debug("Creating LUN %(name)s, %(size)s MB.",
|
||||
{'name': volume['name'], 'size': size_mb})
|
||||
|
||||
if self.config.san_thin_provision:
|
||||
thin_lun = True
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%
|
||||
size_mb = size_mb // 10
|
||||
spec_dict = self._process_extra_specs(volume)
|
||||
|
||||
typeid = volume['volume_type_id']
|
||||
if typeid:
|
||||
# extra_specs with thin specified overrides san_thin_provision
|
||||
spec_value = self._get_volume_type_extra_spec(volume, "thin")
|
||||
if spec_value and spec_value.lower() == "true":
|
||||
thin_lun = True
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%
|
||||
size_mb = size_mb // 10
|
||||
try:
|
||||
selected_pool = self._get_storage_pool(
|
||||
volume,
|
||||
size_mb,
|
||||
spec_dict['pool_type'],
|
||||
"create_lun")
|
||||
|
||||
spec_value = self._get_volume_type_extra_spec(volume, "dedup")
|
||||
if spec_value and spec_value.lower() == "true":
|
||||
dedup = True
|
||||
# A dedup lun is always a thin lun
|
||||
thin_lun = True
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%. The actual allocation may
|
||||
# different, depending on other factors
|
||||
size_mb = full_size_mb // 10
|
||||
|
||||
# Extract the storage_pool name if one is specified
|
||||
pool = self._get_violin_extra_spec(volume, "storage_pool")
|
||||
except exception.ViolinResourceNotFound:
|
||||
raise
|
||||
except Exception:
|
||||
msg = _('No suitable storage pool found')
|
||||
LOG.exception(msg)
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
try:
|
||||
# Note: In the following create_lun command for setting up a dedup
|
||||
@ -169,8 +182,15 @@ class V7000Common(object):
|
||||
|
||||
self._send_cmd(self.vmem_mg.lun.create_lun,
|
||||
"Create resource successfully.",
|
||||
volume['id'], size_mb, dedup,
|
||||
thin_lun, full_size_mb, storage_pool=pool)
|
||||
volume['id'],
|
||||
spec_dict['size_mb'],
|
||||
selected_pool['dedup'],
|
||||
selected_pool['thin'],
|
||||
full_size_mb,
|
||||
storage_pool_id=selected_pool['storage_pool_id'])
|
||||
|
||||
except exception.ViolinBackendErrExists:
|
||||
LOG.debug("Lun %s already exists, continuing.", volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Lun create for %s failed!"), volume['id'])
|
||||
@ -186,17 +206,18 @@ class V7000Common(object):
|
||||
|
||||
LOG.debug("Deleting lun %s.", volume['id'])
|
||||
|
||||
# If the LUN has ever had a snapshot, it has an SRA and
|
||||
# policy that must be deleted first.
|
||||
self._delete_lun_snapshot_bookkeeping(volume['id'])
|
||||
|
||||
try:
|
||||
# If the LUN has ever had a snapshot, it has an SRA and
|
||||
# policy that must be deleted first.
|
||||
self._delete_lun_snapshot_bookkeeping(volume['id'])
|
||||
|
||||
# TODO(rdl) force the delete for now to deal with pending
|
||||
# snapshot issues. Should revisit later for a better fix.
|
||||
self._send_cmd(self.vmem_mg.lun.delete_lun,
|
||||
success_msgs, volume['id'], True)
|
||||
success_msgs, volume['id'])
|
||||
|
||||
except exception.VolumeBackendAPIException:
|
||||
except vmemclient.core.error.NoMatchingObjectIdError:
|
||||
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
|
||||
|
||||
except exception.ViolinBackendErrExists:
|
||||
LOG.exception(_LE("Lun %s has dependent snapshots, "
|
||||
"skipping lun deletion."), volume['id'])
|
||||
raise exception.VolumeIsBusy(volume_name=volume['id'])
|
||||
@ -214,7 +235,8 @@ class V7000Common(object):
|
||||
v = self.vmem_mg
|
||||
|
||||
typeid = volume['volume_type_id']
|
||||
if typeid:
|
||||
|
||||
if typeid and not self.vmem_mg.utility.is_external_head:
|
||||
spec_value = self._get_volume_type_extra_spec(volume, "dedup")
|
||||
if spec_value and spec_value.lower() == "true":
|
||||
# A Dedup lun's size cannot be modified in Concerto.
|
||||
@ -251,7 +273,7 @@ class V7000Common(object):
|
||||
|
||||
:param snapshot: cinder snapshot object provided by the Manager
|
||||
|
||||
Exceptions:
|
||||
:raises:
|
||||
VolumeBackendAPIException: If SRA could not be created, or
|
||||
snapshot policy could not be created
|
||||
RequestRetryTimeout: If backend could not complete the request
|
||||
@ -293,33 +315,19 @@ class V7000Common(object):
|
||||
|
||||
:param snapshot: cinder snapshot object provided by the Manager
|
||||
|
||||
Exceptions:
|
||||
:raises:
|
||||
RequestRetryTimeout: If backend could not complete the request
|
||||
within the allotted timeout.
|
||||
ViolinBackendErr: If backend reports an error during the
|
||||
delete snapshot phase.
|
||||
"""
|
||||
cinder_volume_id = snapshot['volume_id']
|
||||
cinder_snapshot_id = snapshot['id']
|
||||
LOG.debug("Deleting snapshot %(snap_id)s on volume "
|
||||
"%(vol_id)s %(dpy_name)s",
|
||||
{'snap_id': cinder_snapshot_id,
|
||||
'vol_id': cinder_volume_id,
|
||||
{'snap_id': snapshot['id'],
|
||||
'vol_id': snapshot['volume_id'],
|
||||
'dpy_name': snapshot['display_name']})
|
||||
|
||||
try:
|
||||
self._send_cmd(
|
||||
self.vmem_mg.snapshot.delete_lun_snapshot,
|
||||
"Delete TimeMark successfully",
|
||||
lun=cinder_volume_id,
|
||||
comment=self._compress_snapshot_id(cinder_snapshot_id))
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Lun delete snapshot for "
|
||||
"volume %(vol)s snapshot %(snap)s failed!"),
|
||||
{'vol': cinder_volume_id,
|
||||
'snap': cinder_snapshot_id})
|
||||
raise
|
||||
return self._wait_run_delete_lun_snapshot(snapshot)
|
||||
|
||||
def _create_volume_from_snapshot(self, snapshot, volume):
|
||||
"""Create a new cinder volume from a given snapshot of a lun
|
||||
@ -330,27 +338,42 @@ class V7000Common(object):
|
||||
:param snapshot: cinder snapshot object provided by the Manager
|
||||
:param volume: cinder volume to be created
|
||||
"""
|
||||
|
||||
cinder_volume_id = volume['id']
|
||||
cinder_snapshot_id = snapshot['id']
|
||||
pool = None
|
||||
size_mb = volume['size'] * units.Ki
|
||||
result = None
|
||||
spec_dict = {}
|
||||
|
||||
LOG.debug("Copying snapshot %(snap_id)s onto volume %(vol_id)s.",
|
||||
LOG.debug("Copying snapshot %(snap_id)s onto volume %(vol_id)s "
|
||||
"%(dpy_name)s",
|
||||
{'snap_id': cinder_snapshot_id,
|
||||
'vol_id': cinder_volume_id})
|
||||
'vol_id': cinder_volume_id,
|
||||
'dpy_name': snapshot['display_name']})
|
||||
|
||||
typeid = volume['volume_type_id']
|
||||
if typeid:
|
||||
pool = self._get_violin_extra_spec(volume, "storage_pool")
|
||||
source_lun_info = self.vmem_mg.lun.get_lun_info(snapshot['volume_id'])
|
||||
self._validate_lun_type_for_copy(source_lun_info['subType'])
|
||||
|
||||
spec_dict = self._process_extra_specs(volume)
|
||||
try:
|
||||
selected_pool = self._get_storage_pool(volume,
|
||||
size_mb,
|
||||
spec_dict['pool_type'],
|
||||
"create_lun")
|
||||
|
||||
except exception.ViolinResourceNotFound:
|
||||
raise
|
||||
except Exception:
|
||||
msg = _('No suitable storage pool found')
|
||||
LOG.exception(msg)
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
try:
|
||||
result = self.vmem_mg.lun.copy_snapshot_to_new_lun(
|
||||
source_lun=snapshot['volume_id'],
|
||||
source_snapshot_comment=
|
||||
self._compress_snapshot_id(cinder_snapshot_id),
|
||||
source_snapshot_comment=self._compress_snapshot_id(
|
||||
cinder_snapshot_id),
|
||||
destination=cinder_volume_id,
|
||||
storage_pool=pool)
|
||||
storage_pool_id=selected_pool['storage_pool_id'])
|
||||
|
||||
if not result['success']:
|
||||
self._check_error_code(result)
|
||||
@ -374,26 +397,32 @@ class V7000Common(object):
|
||||
:param src_vol: cinder volume to clone
|
||||
:param dest_vol: cinder volume to be created
|
||||
"""
|
||||
pool = None
|
||||
size_mb = dest_vol['size'] * units.Ki
|
||||
src_vol_mb = src_vol['size'] * units.Ki
|
||||
result = None
|
||||
spec_dict = {}
|
||||
|
||||
LOG.debug("Copying lun %(src_vol_id)s onto lun %(dest_vol_id)s.",
|
||||
{'src_vol_id': src_vol['id'],
|
||||
'dest_vol_id': dest_vol['id']})
|
||||
|
||||
# Extract the storage_pool name if one is specified
|
||||
typeid = dest_vol['volume_type_id']
|
||||
if typeid:
|
||||
pool = self._get_violin_extra_spec(dest_vol, "storage_pool")
|
||||
|
||||
try:
|
||||
source_lun_info = self.vmem_mg.lun.get_lun_info(src_vol['id'])
|
||||
self._validate_lun_type_for_copy(source_lun_info['subType'])
|
||||
|
||||
# in order to do a full clone the source lun must have a
|
||||
# snapshot resource
|
||||
self._ensure_snapshot_resource_area(src_vol['id'])
|
||||
|
||||
spec_dict = self._process_extra_specs(dest_vol)
|
||||
selected_pool = self._get_storage_pool(dest_vol,
|
||||
size_mb,
|
||||
spec_dict['pool_type'],
|
||||
None)
|
||||
|
||||
result = self.vmem_mg.lun.copy_lun_to_new_lun(
|
||||
source=src_vol['id'], destination=dest_vol['id'],
|
||||
storage_pool=pool)
|
||||
storage_pool_id=selected_pool['storage_pool_id'])
|
||||
|
||||
if not result['success']:
|
||||
self._check_error_code(result)
|
||||
@ -408,8 +437,12 @@ class V7000Common(object):
|
||||
src_vol['id'], dest_obj_id=result['object_id'])
|
||||
|
||||
# extend the copied lun if requested size is larger then original
|
||||
if dest_vol['size'] > src_vol['size']:
|
||||
self._extend_lun(dest_vol, dest_vol['size'])
|
||||
if size_mb > src_vol_mb:
|
||||
# dest_vol size has to be set to reflect what it is currently
|
||||
dest_vol_size = dest_vol['size']
|
||||
dest_vol['size'] = src_vol['size']
|
||||
self._extend_lun(dest_vol, dest_vol_size)
|
||||
dest_vol['size'] = dest_vol_size
|
||||
|
||||
def _send_cmd(self, request_func, success_msgs, *args, **kwargs):
|
||||
"""Run an XG request function, and retry as needed.
|
||||
@ -531,14 +564,14 @@ class V7000Common(object):
|
||||
|
||||
:param volume_id: Cinder volume ID corresponding to the backend LUN
|
||||
|
||||
Exceptions:
|
||||
:raises:
|
||||
VolumeBackendAPIException: if cinder volume does not exist
|
||||
on backnd, or SRA could not be created.
|
||||
"""
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
volume = api.volume_get(ctxt, volume_id)
|
||||
pool = None
|
||||
spec_dict = {}
|
||||
|
||||
if not volume:
|
||||
msg = (_("Failed to ensure snapshot resource area, could not "
|
||||
"locate volume for id %s") % volume_id)
|
||||
@ -562,15 +595,28 @@ class V7000Common(object):
|
||||
snap_size_mb = 0.2 * lun_size_mb
|
||||
|
||||
snap_size_mb = int(math.ceil(snap_size_mb))
|
||||
typeid = volume['volume_type_id']
|
||||
if typeid:
|
||||
pool = self._get_violin_extra_spec(volume, "storage_pool")
|
||||
|
||||
LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB "
|
||||
"on %(vol_id)s.",
|
||||
{'ssmb': snap_size_mb,
|
||||
'lsmb': lun_size_mb,
|
||||
'vol_id': volume_id})
|
||||
spec_dict = self._process_extra_specs(volume)
|
||||
|
||||
try:
|
||||
selected_pool = self._get_storage_pool(
|
||||
volume,
|
||||
snap_size_mb,
|
||||
spec_dict['pool_type'],
|
||||
None)
|
||||
|
||||
LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB "
|
||||
"on %(vol_id)s",
|
||||
{'ssmb': snap_size_mb,
|
||||
'lsmb': lun_size_mb,
|
||||
'vol_id': volume_id})
|
||||
|
||||
except exception.ViolinResourceNotFound:
|
||||
raise
|
||||
except Exception:
|
||||
msg = _('No suitable storage pool found')
|
||||
LOG.exception(msg)
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
res = self.vmem_mg.snapshot.create_snapshot_resource(
|
||||
lun=volume_id,
|
||||
@ -582,7 +628,7 @@ class V7000Common(object):
|
||||
expansion_increment=CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
|
||||
expansion_max_size=CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
|
||||
enable_shrink=CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
|
||||
storage_pool=pool)
|
||||
storage_pool_id=selected_pool['storage_pool_id'])
|
||||
|
||||
if (not res['success']):
|
||||
msg = (_("Failed to create snapshot resource area on "
|
||||
@ -597,10 +643,9 @@ class V7000Common(object):
|
||||
|
||||
:param volume_id: Cinder volume ID corresponding to the backend LUN
|
||||
|
||||
Exceptions:
|
||||
:raises:
|
||||
VolumeBackendAPIException: when snapshot policy cannot be created.
|
||||
"""
|
||||
|
||||
if not self.vmem_mg.snapshot.lun_has_a_snapshot_policy(
|
||||
lun=volume_id):
|
||||
|
||||
@ -622,7 +667,7 @@ class V7000Common(object):
|
||||
def _delete_lun_snapshot_bookkeeping(self, volume_id):
|
||||
"""Clear residual snapshot support resources from LUN.
|
||||
|
||||
Exceptions:
|
||||
:raises:
|
||||
VolumeBackendAPIException: If snapshots still exist on the LUN.
|
||||
"""
|
||||
|
||||
@ -721,35 +766,35 @@ class V7000Common(object):
|
||||
LOG.debug("Entering _wait_for_lun_or_snap_copy loop: "
|
||||
"vdev=%s, objid=%s", dest_vdev_id, dest_obj_id)
|
||||
|
||||
status = wait_func(src_vol_id)
|
||||
target_id, mb_copied, percent = wait_func(src_vol_id)
|
||||
|
||||
if status[0] is None:
|
||||
# pre-copy transient result, status=(None, None, 0)
|
||||
if target_id is None:
|
||||
# pre-copy transient result
|
||||
LOG.debug("lun or snap copy prepping.")
|
||||
pass
|
||||
elif status[0] != wait_id:
|
||||
# the copy must be complete since another lun is being copied
|
||||
elif target_id != wait_id:
|
||||
# the copy is complete, another lun is being copied
|
||||
LOG.debug("lun or snap copy complete.")
|
||||
raise loopingcall.LoopingCallDone(retvalue=True)
|
||||
elif status[1] is not None:
|
||||
# copy is in progress, status = ('12345', 1700, 10)
|
||||
LOG.debug("MB copied:%d, percent done: %d.",
|
||||
status[1], status[2])
|
||||
elif mb_copied is not None:
|
||||
# copy is in progress
|
||||
LOG.debug("MB copied: %{copied}d, percent done: %{percent}d.",
|
||||
{'copied': mb_copied, 'percent': percent})
|
||||
pass
|
||||
elif status[2] == 0:
|
||||
# copy has just started, status = ('12345', None, 0)
|
||||
elif percent == 0:
|
||||
# copy has just started
|
||||
LOG.debug("lun or snap copy started.")
|
||||
pass
|
||||
elif status[2] == 100:
|
||||
# copy is complete, status = ('12345', None, 100)
|
||||
elif percent == 100:
|
||||
# copy is complete
|
||||
LOG.debug("lun or snap copy complete.")
|
||||
raise loopingcall.LoopingCallDone(retvalue=True)
|
||||
else:
|
||||
# unexpected case
|
||||
LOG.debug("unexpected case (%{id}s, %{bytes}s, %{percent}s)",
|
||||
{'id': six.text_type(status[0]),
|
||||
'bytes': six.text_type(status[1]),
|
||||
'percent': six.text_type(status[2])})
|
||||
{'id': target_id,
|
||||
'bytes': mb_copied,
|
||||
'percent': six.text_type(percent)})
|
||||
raise loopingcall.LoopingCallDone(retvalue=False)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
|
||||
@ -778,7 +823,7 @@ class V7000Common(object):
|
||||
individually. Not all of them are fatal. For example, lun attach
|
||||
failing becase the client is already attached is not a fatal error.
|
||||
|
||||
:param response: a response dict result from the vmemclient request
|
||||
:param response: a response dict result from the vmemclient request
|
||||
"""
|
||||
if "Error: 0x9001003c" in response['msg']:
|
||||
# This error indicates a duplicate attempt to attach lun,
|
||||
@ -856,3 +901,185 @@ class V7000Common(object):
|
||||
spec_value = val
|
||||
break
|
||||
return spec_value
|
||||
|
||||
def _get_storage_pool(self, volume, size_in_mb, pool_type, usage):
|
||||
# User-specified pool takes precedence over others
|
||||
|
||||
pool = None
|
||||
typeid = volume.get('volume_type_id')
|
||||
if typeid:
|
||||
# Extract the storage_pool name if one is specified
|
||||
pool = self._get_violin_extra_spec(volume, "storage_pool")
|
||||
|
||||
# Select a storage pool
|
||||
selected_pool = self.vmem_mg.pool.select_storage_pool(
|
||||
size_in_mb,
|
||||
pool_type,
|
||||
pool,
|
||||
self.config.violin_dedup_only_pools,
|
||||
self.config.violin_dedup_capable_pools,
|
||||
self.config.violin_pool_allocation_method,
|
||||
usage)
|
||||
if selected_pool is None:
|
||||
# Backend has not provided a suitable storage pool
|
||||
msg = _("Backend does not have a suitable storage pool.")
|
||||
raise exception.ViolinResourceNotFound(message=msg)
|
||||
|
||||
LOG.debug("Storage pool returned is %s",
|
||||
selected_pool['storage_pool'])
|
||||
|
||||
return selected_pool
|
||||
|
||||
def _process_extra_specs(self, volume):
|
||||
spec_dict = {}
|
||||
thin_lun = False
|
||||
thick_lun = False
|
||||
dedup = False
|
||||
size_mb = volume['size'] * units.Ki
|
||||
full_size_mb = size_mb
|
||||
|
||||
if self.config.san_thin_provision:
|
||||
thin_lun = True
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%
|
||||
size_mb = int(math.ceil(float(size_mb) / LUN_ALLOC_SZ))
|
||||
|
||||
typeid = volume.get('volume_type_id')
|
||||
if typeid:
|
||||
# extra_specs with thin specified overrides san_thin_provision
|
||||
spec_value = self._get_volume_type_extra_spec(volume, "thin")
|
||||
if not thin_lun and spec_value and spec_value.lower() == "true":
|
||||
thin_lun = True
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%
|
||||
size_mb = int(math.ceil(float(size_mb) / LUN_ALLOC_SZ))
|
||||
|
||||
# Set thick lun before checking for dedup,
|
||||
# since dedup is always thin
|
||||
if not thin_lun:
|
||||
thick_lun = True
|
||||
|
||||
spec_value = self._get_volume_type_extra_spec(volume, "dedup")
|
||||
if spec_value and spec_value.lower() == "true":
|
||||
dedup = True
|
||||
# A dedup lun is always a thin lun
|
||||
thin_lun = True
|
||||
thick_lun = False
|
||||
# Set the actual allocation size for thin lun
|
||||
# default here is 10%. The actual allocation may
|
||||
# different, depending on other factors
|
||||
size_mb = int(math.ceil(float(full_size_mb) / LUN_ALLOC_SZ))
|
||||
|
||||
if dedup:
|
||||
spec_dict['pool_type'] = "dedup"
|
||||
elif thin_lun:
|
||||
spec_dict['pool_type'] = "thin"
|
||||
else:
|
||||
spec_dict['pool_type'] = "thick"
|
||||
thick_lun = True
|
||||
|
||||
spec_dict['size_mb'] = size_mb
|
||||
spec_dict['thick'] = thick_lun
|
||||
spec_dict['thin'] = thin_lun
|
||||
spec_dict['dedup'] = dedup
|
||||
|
||||
return spec_dict
|
||||
|
||||
def _get_volume_stats(self, san_ip):
|
||||
"""Gathers array stats and converts them to GB values."""
|
||||
free_gb = 0
|
||||
total_gb = 0
|
||||
|
||||
owner = socket.getfqdn(san_ip)
|
||||
# Store DNS lookups to prevent asking the same question repeatedly
|
||||
owner_lookup = {san_ip: owner}
|
||||
pools = self.vmem_mg.pool.get_storage_pools(
|
||||
verify=True,
|
||||
include_full_info=True,
|
||||
)
|
||||
|
||||
for short_info, full_info in pools:
|
||||
mod = ''
|
||||
pool_free_mb = 0
|
||||
pool_total_mb = 0
|
||||
for dev in full_info.get('physicaldevices', []):
|
||||
if dev['owner'] not in owner_lookup:
|
||||
owner_lookup[dev['owner']] = socket.getfqdn(dev['owner'])
|
||||
if owner_lookup[dev['owner']] == owner:
|
||||
pool_free_mb += dev['availsize_mb']
|
||||
pool_total_mb += dev['size_mb']
|
||||
elif not mod:
|
||||
mod = ' *'
|
||||
LOG.debug('pool %(pool)s: %(avail)s / %(total)s MB free %(mod)s',
|
||||
{'pool': short_info['name'], 'avail': pool_free_mb,
|
||||
'total': pool_total_mb, 'mod': mod})
|
||||
free_gb += int(pool_free_mb / units.Ki)
|
||||
total_gb += int(pool_total_mb / units.Ki)
|
||||
|
||||
data = {
|
||||
'vendor_name': 'Violin Memory, Inc.',
|
||||
'reserved_percentage': self.config.reserved_percentage,
|
||||
'QoS_support': False,
|
||||
'free_capacity_gb': free_gb,
|
||||
'total_capacity_gb': total_gb,
|
||||
'consistencygroup_support': False,
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
def _wait_run_delete_lun_snapshot(self, snapshot):
|
||||
"""Run and wait for LUN snapshot to complete.
|
||||
|
||||
:param snapshot -- cinder snapshot object provided by the Manager
|
||||
"""
|
||||
cinder_volume_id = snapshot['volume_id']
|
||||
cinder_snapshot_id = snapshot['id']
|
||||
|
||||
comment = self._compress_snapshot_id(cinder_snapshot_id)
|
||||
oid = self.vmem_mg.snapshot.snapshot_comment_to_object_id(
|
||||
cinder_volume_id, comment)
|
||||
|
||||
def _loop_func():
|
||||
LOG.debug("Entering _wait_run_delete_lun_snapshot loop: "
|
||||
"vol=%(vol)s, snap_id=%(snap_id)s, oid=%(oid)s",
|
||||
{'vol': cinder_volume_id,
|
||||
'oid': oid,
|
||||
'snap_id': cinder_snapshot_id})
|
||||
|
||||
ans = self.vmem_mg.snapshot.delete_lun_snapshot(
|
||||
snapshot_object_id=oid)
|
||||
|
||||
if ans['success']:
|
||||
LOG.debug("Delete snapshot %(snap_id)s of %(vol)s: "
|
||||
"success", {'vol': cinder_volume_id,
|
||||
'snap_id': cinder_snapshot_id})
|
||||
raise loopingcall.LoopingCallDone(retvalue=True)
|
||||
else:
|
||||
LOG.warning(_LW("Delete snapshot %(snap)s of %(vol)s "
|
||||
"encountered temporary error: %(msg)s"),
|
||||
{'snap': cinder_snapshot_id,
|
||||
'vol': cinder_volume_id,
|
||||
'msg': ans['msg']})
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
|
||||
success = timer.start(interval=1).wait()
|
||||
|
||||
if not success:
|
||||
msg = (_("Failed to delete snapshot "
|
||||
"%(snap)s of volume %(vol)s") %
|
||||
{'snap': cinder_snapshot_id, 'vol': cinder_volume_id})
|
||||
raise exception.ViolinBackendErr(msg)
|
||||
|
||||
def _validate_lun_type_for_copy(self, lun_type):
|
||||
"""Make sure volume type is thick.
|
||||
|
||||
:param lun_type: Cinder volume type
|
||||
|
||||
:raises:
|
||||
VolumeBackendAPIException: if volume type is not thick,
|
||||
copying the lun is not possible.
|
||||
"""
|
||||
if lun_type != CONCERTO_LUN_TYPE_THICK:
|
||||
msg = _('Lun copy currently only supported for thick luns')
|
||||
LOG.error(msg)
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
@ -314,7 +314,7 @@ class V7000FCPDriver(driver.FibreChannelDriver):
|
||||
:returns: integer value of lun ID
|
||||
"""
|
||||
v = self.common.vmem_mg
|
||||
lun_id = -1
|
||||
lun_id = None
|
||||
|
||||
client_info = v.client.get_client_info(client_name)
|
||||
|
||||
@ -323,7 +323,10 @@ class V7000FCPDriver(driver.FibreChannelDriver):
|
||||
lun_id = x['lun']
|
||||
break
|
||||
|
||||
return int(lun_id)
|
||||
if lun_id:
|
||||
lun_id = int(lun_id)
|
||||
|
||||
return lun_id
|
||||
|
||||
def _is_lun_id_ready(self, volume_name, client_name):
|
||||
"""Get the lun ID for an exported volume.
|
||||
@ -338,10 +341,10 @@ class V7000FCPDriver(driver.FibreChannelDriver):
|
||||
|
||||
lun_id = -1
|
||||
lun_id = self._get_lun_id(volume_name, client_name)
|
||||
if lun_id != -1:
|
||||
return True
|
||||
else:
|
||||
if lun_id is None:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def _build_initiator_target_map(self, connector):
|
||||
"""Build the target_wwns and the initiator target map."""
|
||||
|
345
cinder/volume/drivers/violin/v7000_iscsi.py
Normal file
345
cinder/volume/drivers/violin/v7000_iscsi.py
Normal file
@ -0,0 +1,345 @@
|
||||
# Copyright 2016 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Violin 7000 Series All-Flash Array iSCSI Volume Driver
|
||||
|
||||
Provides ISCSI specific LUN services for V7000 series flash arrays.
|
||||
|
||||
This driver requires Concerto v7.5.4 or newer software on the array.
|
||||
|
||||
You will need to install the python VMEM REST client:
|
||||
sudo pip install vmemclient
|
||||
|
||||
Set the following in the cinder.conf file to enable the VMEM V7000
|
||||
ISCSI Driver along with the required flags:
|
||||
|
||||
volume_driver=cinder.volume.drivers.violin.v7000_iscsi.V7000ISCSIDriver
|
||||
"""
|
||||
|
||||
import random
|
||||
import uuid
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from cinder import exception
|
||||
from cinder import interface
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume.drivers.violin import v7000_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class V7000ISCSIDriver(driver.ISCSIDriver):
|
||||
"""Executes commands relating to iscsi based Violin Memory arrays.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
"""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(V7000ISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.stats = {}
|
||||
self.gateway_iscsi_ip_addresses = []
|
||||
self.configuration.append_config_values(v7000_common.violin_opts)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
self.common = v7000_common.V7000Common(self.configuration)
|
||||
|
||||
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s"),
|
||||
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the driver does while starting."""
|
||||
super(V7000ISCSIDriver, self).do_setup(context)
|
||||
|
||||
self.common.do_setup(context)
|
||||
|
||||
# Register the client with the storage array
|
||||
iscsi_version = self.VERSION + "-ISCSI"
|
||||
self.common.vmem_mg.utility.set_managed_by_openstack_version(
|
||||
iscsi_version, protocol="iSCSI")
|
||||
|
||||
# Getting iscsi IPs from the array is incredibly expensive,
|
||||
# so only do it once.
|
||||
if not self.configuration.violin_iscsi_target_ips:
|
||||
LOG.warning(_LW("iSCSI target ip addresses not configured "))
|
||||
self.gateway_iscsi_ip_addresses = (
|
||||
self.common.vmem_mg.utility.get_iscsi_interfaces())
|
||||
else:
|
||||
self.gateway_iscsi_ip_addresses = (
|
||||
self.configuration.violin_iscsi_target_ips)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self.common.check_for_setup_error()
|
||||
if len(self.gateway_iscsi_ip_addresses) == 0:
|
||||
msg = _('No iSCSI IPs configured on SAN gateway')
|
||||
raise exception.ViolinInvalidBackendConfig(reason=msg)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
self.common._create_lun(volume)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
self.common._create_volume_from_snapshot(snapshot, volume)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
self.common._create_lun_from_lun(src_vref, volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
self.common._delete_lun(volume)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend an existing volume's size."""
|
||||
self.common._extend_lun(volume, new_size)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
self.common._create_lun_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
self.common._delete_lun_snapshot(snapshot)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously checks and re-exports volumes at cinder start time."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
"""Exports the volume."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Allow connection to connector and return connection info."""
|
||||
resp = {}
|
||||
|
||||
LOG.debug("Initialize_connection: initiator - %(initiator)s host - "
|
||||
"%(host)s ip - %(ip)s",
|
||||
{'initiator': connector['initiator'],
|
||||
'host': connector['host'],
|
||||
'ip': connector['ip']})
|
||||
|
||||
iqn = self._get_iqn(connector)
|
||||
|
||||
# pick a random single target to give the connector since
|
||||
# there is no multipathing support
|
||||
tgt = self.gateway_iscsi_ip_addresses[random.randint(
|
||||
0, len(self.gateway_iscsi_ip_addresses) - 1)]
|
||||
|
||||
resp = self.common.vmem_mg.client.create_client(
|
||||
name=connector['host'], proto='iSCSI',
|
||||
iscsi_iqns=connector['initiator'])
|
||||
|
||||
# raise if we failed for any reason other than a 'client
|
||||
# already exists' error code
|
||||
if not resp['success'] and 'Error: 0x900100cd' not in resp['msg']:
|
||||
msg = _("Failed to create iscsi client")
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
resp = self.common.vmem_mg.client.create_iscsi_target(
|
||||
name=iqn, client_name=connector['host'],
|
||||
ip=self.gateway_iscsi_ip_addresses, access_mode='ReadWrite')
|
||||
|
||||
# same here, raise for any failure other than a 'target
|
||||
# already exists' error code
|
||||
if not resp['success'] and 'Error: 0x09024309' not in resp['msg']:
|
||||
msg = _("Failed to create iscsi target")
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
lun_id = self._export_lun(volume, iqn, connector)
|
||||
|
||||
properties = {}
|
||||
properties['target_discovered'] = False
|
||||
properties['target_iqn'] = iqn
|
||||
properties['target_portal'] = '%s:%s' % (tgt, '3260')
|
||||
properties['target_lun'] = lun_id
|
||||
properties['volume_id'] = volume['id']
|
||||
|
||||
LOG.debug("Return ISCSI data: %(properties)s.",
|
||||
{'properties': properties})
|
||||
|
||||
return {'driver_volume_type': 'iscsi', 'data': properties}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Terminates the connection (target<-->initiator)."""
|
||||
iqn = self._get_iqn(connector)
|
||||
self._unexport_lun(volume, iqn, connector)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats.
|
||||
|
||||
If 'refresh' is True, update the stats first.
|
||||
"""
|
||||
if refresh or not self.stats:
|
||||
self._update_volume_stats()
|
||||
return self.stats
|
||||
|
||||
def _update_volume_stats(self):
|
||||
"""Gathers array stats and converts them to GB values."""
|
||||
data = self.common._get_volume_stats(self.configuration.san_ip)
|
||||
|
||||
backend_name = self.configuration.volume_backend_name
|
||||
data['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
data['driver_version'] = self.VERSION
|
||||
data['storage_protocol'] = 'iSCSI'
|
||||
for i in data:
|
||||
LOG.debug("stat update: %(name)s=%(data)s",
|
||||
{'name': i, 'data': data[i]})
|
||||
|
||||
self.stats = data
|
||||
|
||||
def _export_lun(self, volume, target, connector):
|
||||
"""Generates the export configuration for the given volume.
|
||||
|
||||
:param volume: volume object provided by the Manager
|
||||
:param connector: connector object provided by the Manager
|
||||
:returns: the LUN ID assigned by the backend
|
||||
"""
|
||||
lun_id = ''
|
||||
v = self.common.vmem_mg
|
||||
|
||||
LOG.debug("Exporting lun %(vol_id)s - initiator iqns %(i_iqns)s "
|
||||
"- target iqns %(t_iqns)s.",
|
||||
{'vol_id': volume['id'], 'i_iqns': connector['initiator'],
|
||||
't_iqns': self.gateway_iscsi_ip_addresses})
|
||||
|
||||
try:
|
||||
lun_id = self.common._send_cmd_and_verify(
|
||||
v.lun.assign_lun_to_iscsi_target,
|
||||
self._is_lun_id_ready,
|
||||
"Assign device successfully",
|
||||
[volume['id'], target],
|
||||
[volume['id'], connector['host']])
|
||||
|
||||
except exception.ViolinBackendErr:
|
||||
LOG.exception(_LE("Backend returned error for lun export."))
|
||||
raise
|
||||
|
||||
except Exception:
|
||||
raise exception.ViolinInvalidBackendConfig(
|
||||
reason=_('LUN export failed!'))
|
||||
|
||||
lun_id = self._get_lun_id(volume['id'], connector['host'])
|
||||
LOG.info(_LI("Exported lun %(vol_id)s on lun_id %(lun_id)s."),
|
||||
{'vol_id': volume['id'], 'lun_id': lun_id})
|
||||
|
||||
return lun_id
|
||||
|
||||
def _unexport_lun(self, volume, target, connector):
|
||||
"""Removes the export configuration for the given volume.
|
||||
|
||||
The equivalent CLI command is "no lun export container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
v = self.common.vmem_mg
|
||||
|
||||
LOG.info(_LI("Unexporting lun %(vol)s host is %(host)s"),
|
||||
{'vol': volume['id'], 'host': connector['host']})
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.lun.unassign_lun_from_iscsi_target,
|
||||
"Unassign device successfully",
|
||||
volume['id'], target, True)
|
||||
|
||||
except exception.ViolinBackendErrNotFound:
|
||||
LOG.info(_LI("Lun %s already unexported, continuing"),
|
||||
volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN unexport failed!"))
|
||||
msg = _("LUN unexport failed")
|
||||
raise exception.ViolinBackendErr(message=msg)
|
||||
|
||||
def _is_lun_id_ready(self, volume_name, client_name):
|
||||
"""Get the lun ID for an exported volume.
|
||||
|
||||
If the lun is successfully assigned (exported) to a client, the
|
||||
client info has the lun_id.
|
||||
|
||||
Note: The structure returned for iscsi is different from the
|
||||
one returned for FC. Therefore this funtion is here instead of
|
||||
common.
|
||||
|
||||
Arguments:
|
||||
volume_name -- name of volume to query for lun ID
|
||||
client_name -- name of client associated with the volume
|
||||
|
||||
Returns:
|
||||
lun_id -- Returns True or False
|
||||
"""
|
||||
|
||||
lun_id = -1
|
||||
lun_id = self._get_lun_id(volume_name, client_name)
|
||||
|
||||
if lun_id is None:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def _get_lun_id(self, volume_name, client_name):
|
||||
"""Get the lun ID for an exported volume.
|
||||
|
||||
If the lun is successfully assigned (exported) to a client, the
|
||||
client info has the lun_id.
|
||||
|
||||
Note: The structure returned for iscsi is different from the
|
||||
one returned for FC. Therefore this funtion is here instead of
|
||||
common.
|
||||
|
||||
Arguments:
|
||||
volume_name -- name of volume to query for lun ID
|
||||
client_name -- name of client associated with the volume
|
||||
|
||||
Returns:
|
||||
lun_id -- integer value of lun ID
|
||||
"""
|
||||
v = self.common.vmem_mg
|
||||
lun_id = None
|
||||
|
||||
client_info = v.client.get_client_info(client_name)
|
||||
|
||||
for x in client_info['ISCSIDevices']:
|
||||
if volume_name == x['name']:
|
||||
lun_id = x['lun']
|
||||
break
|
||||
|
||||
if lun_id:
|
||||
lun_id = int(lun_id)
|
||||
|
||||
return lun_id
|
||||
|
||||
def _get_iqn(self, connector):
|
||||
# The vmemclient connection properties list hostname field may
|
||||
# change depending on failover cluster config. Use a UUID
|
||||
# from the backend's IP address to avoid a potential naming
|
||||
# issue.
|
||||
host_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, self.configuration.san_ip)
|
||||
return "%s%s.%s" % (self.configuration.iscsi_target_prefix,
|
||||
connector['host'], host_uuid)
|
3
releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml
Normal file
3
releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Added backend driver for Violin Memory 7000 iscsi storage.
|
Loading…
x
Reference in New Issue
Block a user