NetApp: Add NVMe/TCP driver
Add NVMe/TCP cinder volume driver for the NetApp ONTAP Cluster. Implements: blueprint netapp-volume-driver Change-Id: Ic583131b46d928ef7669fa47e697cce65e7632bc
This commit is contained in:
parent
4a6792fba3
commit
3141da1442
@ -71,6 +71,8 @@ FAKE_NA_SERVER_API_1_20.set_api_version(1, 20)
|
||||
VOLUME_VSERVER_NAME = 'fake_vserver'
|
||||
VOLUME_NAMES = ('volume1', 'volume2')
|
||||
VOLUME_NAME = 'volume1'
|
||||
NAMESPACE_NAME = '/vol/vol1/namespace1'
|
||||
HOST_NQN = 'nqn.1992-01.example.com:host'
|
||||
DEST_VOLUME_NAME = 'volume-dest'
|
||||
LUN_NAME = 'fake-lun-name'
|
||||
DEST_LUN_NAME = 'new-fake-lun-name'
|
||||
@ -3058,3 +3060,91 @@ GET_VSERVER_PEERS_RESPONSE_REST = {
|
||||
"num_records": 1,
|
||||
"records": GET_VSERVER_PEERS_RECORDS_REST
|
||||
}
|
||||
|
||||
GET_NAMESPACE_RESPONSE_REST = {
|
||||
"records": [
|
||||
{
|
||||
"uuid": "fake_uuid1",
|
||||
"svm": {
|
||||
"name": "fake_vserver1"
|
||||
},
|
||||
"name": "/vol/fake_vol_001/test",
|
||||
"location": {
|
||||
"volume": {
|
||||
"name": "fake_vol_001"
|
||||
}
|
||||
},
|
||||
"os_type": "linux",
|
||||
"space": {
|
||||
"block_size": 9999,
|
||||
"size": 999999,
|
||||
"guarantee": {
|
||||
"requested": True
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
"uuid": "fake_uuid2",
|
||||
"svm": {
|
||||
"name": "fake_vserver2"
|
||||
},
|
||||
"name": "/vol/fake_vol_002/test",
|
||||
"location": {
|
||||
"volume": {
|
||||
"name": "fake_vol_002"
|
||||
}
|
||||
},
|
||||
"os_type": "linux",
|
||||
"space": {
|
||||
"block_size": 8888,
|
||||
"size": 8888888,
|
||||
"guarantee": {
|
||||
"requested": True
|
||||
}
|
||||
},
|
||||
}
|
||||
],
|
||||
"num_records": 2,
|
||||
}
|
||||
|
||||
SUBSYSTEM = 'openstack-fake_subsystem'
|
||||
TARGET_NQN = 'nqn.1992-01.example.com:target'
|
||||
GET_SUBSYSTEM_RESPONSE_REST = {
|
||||
"records": [
|
||||
{
|
||||
"uuid": "fake_uuid1",
|
||||
"name": SUBSYSTEM,
|
||||
"os_type": "linux",
|
||||
"target_nqn": TARGET_NQN,
|
||||
}
|
||||
],
|
||||
"num_records": 1,
|
||||
}
|
||||
|
||||
GET_SUBSYSTEM_MAP_RESPONSE_REST = {
|
||||
"records": [
|
||||
{
|
||||
"namespace": {
|
||||
"uuid": FAKE_UUID,
|
||||
},
|
||||
"subsystem": {
|
||||
"name": SUBSYSTEM
|
||||
},
|
||||
"svm": {
|
||||
"name": VSERVER_NAME
|
||||
},
|
||||
},
|
||||
],
|
||||
"num_records": 1,
|
||||
}
|
||||
|
||||
GET_INTERFACES_NVME_REST = {
|
||||
'records': [
|
||||
{
|
||||
"ip": {
|
||||
"address": "10.10.10.10",
|
||||
}
|
||||
}
|
||||
],
|
||||
'num_records': 1
|
||||
}
|
||||
|
@ -3707,3 +3707,399 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
'patch', body=body)
|
||||
self.client._get_volume_by_args.assert_called_once_with(
|
||||
vol_name=fake_client.VOLUME_NAMES[0])
|
||||
|
||||
def test_get_namespace_list(self):
|
||||
response = fake_client.GET_NAMESPACE_RESPONSE_REST
|
||||
|
||||
fake_query = {
|
||||
'svm.name': 'fake_vserver',
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid'
|
||||
}
|
||||
|
||||
expected_result = [
|
||||
{
|
||||
'Vserver': 'fake_vserver1',
|
||||
'Volume': 'fake_vol_001',
|
||||
'Size': 999999,
|
||||
'Qtree': '',
|
||||
'Path': '/vol/fake_vol_001/test',
|
||||
'OsType': 'linux',
|
||||
'SpaceReserved': True,
|
||||
'UUID': 'fake_uuid1'
|
||||
},
|
||||
{
|
||||
'Vserver': 'fake_vserver2',
|
||||
'Volume': 'fake_vol_002',
|
||||
'Size': 8888888,
|
||||
'Qtree': '',
|
||||
'Path': '/vol/fake_vol_002/test',
|
||||
'OsType': 'linux',
|
||||
'SpaceReserved': True,
|
||||
'UUID': 'fake_uuid2'
|
||||
},
|
||||
]
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_list()
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces/', 'get', query=fake_query)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_get_namespace_list_no_response(self):
|
||||
response = fake_client.NO_RECORDS_RESPONSE_REST
|
||||
fake_query = {
|
||||
'svm.name': 'fake_vserver',
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid'
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_list()
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces/', 'get', query=fake_query)
|
||||
self.assertEqual([], result)
|
||||
|
||||
def test_destroy_namespace(self):
|
||||
|
||||
fake_query = {
|
||||
'name': '/vol/fake_vol_001/test',
|
||||
'svm': 'fake_vserver'
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.destroy_namespace('/vol/fake_vol_001/test', force=False)
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'delete', query=fake_query)
|
||||
|
||||
def test_destroy_namespace_force_true(self):
|
||||
|
||||
fake_query = {
|
||||
'name': '/vol/fake_vol_001/test',
|
||||
'svm': 'fake_vserver',
|
||||
'allow_delete_while_mapped': 'true'
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.destroy_namespace('/vol/fake_vol_001/test', force=True)
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'delete', query=fake_query)
|
||||
|
||||
def test_clone_namespace(self):
|
||||
|
||||
fake_body = {
|
||||
'svm': {
|
||||
'name': 'fake_vserver'
|
||||
},
|
||||
'name': '/vol/fake_volume/fake_new_name',
|
||||
'clone': {
|
||||
'source': {
|
||||
'name': '/vol/fake_volume/fake_name',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.clone_namespace('fake_volume',
|
||||
'fake_name',
|
||||
'fake_new_name')
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'post', body=fake_body)
|
||||
|
||||
def test_get_namespace_by_args(self):
|
||||
response = fake_client.GET_NAMESPACE_RESPONSE_REST
|
||||
|
||||
lun_info_args = {
|
||||
'vserver': fake.VSERVER_NAME,
|
||||
'path': fake.LUN_PATH,
|
||||
'uuid': fake.UUID1}
|
||||
|
||||
fake_query = {
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid,space.block_size',
|
||||
'svm.name': fake.VSERVER_NAME,
|
||||
'name': fake.LUN_PATH,
|
||||
'uuid': fake.UUID1,
|
||||
}
|
||||
|
||||
expected_result = [
|
||||
{
|
||||
'Vserver': 'fake_vserver1',
|
||||
'Volume': 'fake_vol_001',
|
||||
'Size': 999999,
|
||||
'Qtree': '',
|
||||
'Path': '/vol/fake_vol_001/test',
|
||||
'OsType': 'linux',
|
||||
'SpaceReserved': True,
|
||||
'UUID': 'fake_uuid1',
|
||||
'BlockSize': 9999
|
||||
},
|
||||
{
|
||||
'Vserver': 'fake_vserver2',
|
||||
'Volume': 'fake_vol_002',
|
||||
'Size': 8888888,
|
||||
'Qtree': '',
|
||||
'Path': '/vol/fake_vol_002/test',
|
||||
'OsType': 'linux',
|
||||
'SpaceReserved': True,
|
||||
'UUID': 'fake_uuid2',
|
||||
'BlockSize': 8888
|
||||
},
|
||||
]
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_by_args(**lun_info_args)
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'get', query=fake_query)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_get_namespace_by_args_no_response(self):
|
||||
response = fake_client.NO_RECORDS_RESPONSE_REST
|
||||
|
||||
lun_info_args = {
|
||||
'vserver': fake.VSERVER_NAME,
|
||||
'path': fake.LUN_PATH,
|
||||
'uuid': fake.UUID1}
|
||||
|
||||
fake_query = {
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid,space.block_size',
|
||||
'svm.name': fake.VSERVER_NAME,
|
||||
'name': fake.LUN_PATH,
|
||||
'uuid': fake.UUID1,
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_by_args(**lun_info_args)
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'get', query=fake_query)
|
||||
self.assertEqual([], result)
|
||||
|
||||
def test_namespace_resize(self):
|
||||
fake_body = {'space.size': 9999}
|
||||
fake_query = {'name': fake.LUN_PATH}
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.namespace_resize(fake.LUN_PATH, 9999)
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'patch', body=fake_body, query=fake_query)
|
||||
|
||||
def test_get_namespace_sizes_by_volume(self):
|
||||
response = fake_client.GET_NAMESPACE_RESPONSE_REST
|
||||
|
||||
fake_query = {
|
||||
'location.volume.name': 'fake_volume',
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
|
||||
expected_result = [
|
||||
{
|
||||
'path': '/vol/fake_vol_001/test',
|
||||
'size': 999999,
|
||||
},
|
||||
{
|
||||
'path': '/vol/fake_vol_002/test',
|
||||
'size': 8888888,
|
||||
},
|
||||
]
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_sizes_by_volume('fake_volume')
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'get', query=fake_query)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_get_namespace_sizes_by_volume_no_response(self):
|
||||
response = fake_client.NO_RECORDS_RESPONSE_REST
|
||||
|
||||
fake_query = {
|
||||
'location.volume.name': 'fake_volume',
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
|
||||
self.mock_object(self.client, 'send_request', return_value=response)
|
||||
|
||||
result = self.client.get_namespace_sizes_by_volume('fake_volume')
|
||||
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'get', query=fake_query)
|
||||
self.assertEqual([], result)
|
||||
|
||||
def test_create_namespace(self):
|
||||
"""Issues API request for creating namespace on volume."""
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.create_namespace(
|
||||
fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME,
|
||||
fake_client.VOLUME_SIZE_TOTAL, {'OsType': 'linux'})
|
||||
|
||||
path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.NAMESPACE_NAME}'
|
||||
body = {
|
||||
'name': path,
|
||||
'space.size': str(fake_client.VOLUME_SIZE_TOTAL),
|
||||
'os_type': 'linux',
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/storage/namespaces', 'post', body=body)
|
||||
|
||||
def test_create_namespace_error(self):
|
||||
api_error = netapp_api.NaApiError(code=0)
|
||||
self.mock_object(self.client, 'send_request', side_effect=api_error)
|
||||
|
||||
self.assertRaises(
|
||||
netapp_api.NaApiError,
|
||||
self.client.create_namespace,
|
||||
fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME,
|
||||
fake_client.VOLUME_SIZE_TOTAL, {'OsType': 'linux'})
|
||||
|
||||
def test_get_subsystem_by_host(self):
|
||||
response = fake_client.GET_SUBSYSTEM_RESPONSE_REST
|
||||
self.mock_object(self.client, 'send_request',
|
||||
return_value=response)
|
||||
|
||||
res = self.client.get_subsystem_by_host(fake_client.HOST_NQN)
|
||||
|
||||
expected_res = [
|
||||
{'name': fake_client.SUBSYSTEM, 'os_type': 'linux'}]
|
||||
self.assertEqual(expected_res, res)
|
||||
query = {
|
||||
'svm.name': self.client.vserver,
|
||||
'hosts.nqn': fake_client.HOST_NQN,
|
||||
'fields': 'name,os_type',
|
||||
'name': 'openstack-*',
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystems', 'get', query=query)
|
||||
|
||||
def test_create_subsystem(self):
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.create_subsystem(fake_client.SUBSYSTEM, 'linux',
|
||||
fake_client.HOST_NQN)
|
||||
|
||||
body = {
|
||||
'svm.name': self.client.vserver,
|
||||
'name': fake_client.SUBSYSTEM,
|
||||
'os_type': 'linux',
|
||||
'hosts': [{'nqn': fake_client.HOST_NQN}]
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystems', 'post', body=body)
|
||||
|
||||
def test_get_namespace_map(self):
|
||||
response = fake_client.GET_SUBSYSTEM_MAP_RESPONSE_REST
|
||||
self.mock_object(self.client, 'send_request',
|
||||
return_value=response)
|
||||
|
||||
res = self.client.get_namespace_map(fake_client.NAMESPACE_NAME)
|
||||
|
||||
expected_res = [
|
||||
{'subsystem': fake_client.SUBSYSTEM,
|
||||
'uuid': fake_client.FAKE_UUID,
|
||||
'vserver': fake_client.VSERVER_NAME}]
|
||||
self.assertEqual(expected_res, res)
|
||||
query = {
|
||||
'namespace.name': fake_client.NAMESPACE_NAME,
|
||||
'fields': 'subsystem.name,namespace.uuid,svm.name',
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystem-maps', 'get', query=query)
|
||||
|
||||
def test_map_namespace(self):
|
||||
response = fake_client.GET_SUBSYSTEM_MAP_RESPONSE_REST
|
||||
self.mock_object(self.client, 'send_request',
|
||||
return_value=response)
|
||||
|
||||
res = self.client.map_namespace(fake_client.NAMESPACE_NAME,
|
||||
fake_client.SUBSYSTEM)
|
||||
|
||||
self.assertEqual(fake_client.FAKE_UUID, res)
|
||||
body = {
|
||||
'namespace.name': fake_client.NAMESPACE_NAME,
|
||||
'subsystem.name': fake_client.SUBSYSTEM
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystem-maps', 'post', body=body,
|
||||
query={'return_records': 'true'})
|
||||
|
||||
def test_map_namespace_error(self):
|
||||
api_error = netapp_api.NaApiError(code=0)
|
||||
self.mock_object(self.client, 'send_request', side_effect=api_error)
|
||||
|
||||
self.assertRaises(
|
||||
netapp_api.NaApiError,
|
||||
self.client.map_namespace,
|
||||
fake_client.VOLUME_NAME, fake_client.SUBSYSTEM)
|
||||
|
||||
@ddt.data(
|
||||
{'response': fake_client.GET_SUBSYSTEM_RESPONSE_REST,
|
||||
'expected': fake_client.TARGET_NQN},
|
||||
{'response': fake_client.NO_RECORDS_RESPONSE_REST,
|
||||
'expected': None})
|
||||
@ddt.unpack
|
||||
def test_get_nvme_subsystem_nqn(self, response, expected):
|
||||
self.mock_object(self.client, 'send_request',
|
||||
return_value=response)
|
||||
|
||||
res = self.client.get_nvme_subsystem_nqn(fake_client.SUBSYSTEM)
|
||||
|
||||
self.assertEqual(expected, res)
|
||||
query = {
|
||||
'fields': 'target_nqn',
|
||||
'name': fake_client.SUBSYSTEM,
|
||||
'svm.name': self.client.vserver
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystems', 'get', query=query)
|
||||
|
||||
def test_get_nvme_target_portals(self):
|
||||
response = fake_client.GET_INTERFACES_NVME_REST
|
||||
self.mock_object(self.client, 'send_request',
|
||||
return_value=response)
|
||||
|
||||
res = self.client.get_nvme_target_portals()
|
||||
|
||||
expected = ["10.10.10.10"]
|
||||
self.assertEqual(expected, res)
|
||||
query = {
|
||||
'services': 'data_nvme_tcp',
|
||||
'fields': 'ip.address',
|
||||
'enabled': 'true',
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/network/ip/interfaces', 'get', query=query)
|
||||
|
||||
def test_unmap_namespace(self):
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.unmap_namespace(fake_client.NAMESPACE_NAME,
|
||||
fake_client.SUBSYSTEM)
|
||||
|
||||
query = {
|
||||
'subsystem.name': fake_client.SUBSYSTEM,
|
||||
'namespace.name': fake_client.NAMESPACE_NAME,
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'/protocols/nvme/subsystem-maps', 'delete', query=query)
|
||||
|
@ -22,7 +22,9 @@ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56'
|
||||
LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86'
|
||||
LUN_HANDLE = 'fake_lun_handle'
|
||||
NAMESPACE_HANDLE = 'fake_namespace_handle'
|
||||
LUN_NAME = 'lun1'
|
||||
NAMESPACE_NAME = 'namespace1'
|
||||
LUN_SIZE = 3
|
||||
LUN_TABLE = {LUN_NAME: None}
|
||||
SIZE = 1024
|
||||
@ -40,6 +42,7 @@ AGGREGATE = 'aggr1'
|
||||
FLEXVOL = 'openstack-flexvol'
|
||||
NFS_FILE_PATH = 'nfsvol'
|
||||
PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME)
|
||||
PATH_NAMESPACE = '/vol/%s/%s' % (POOL_NAME, NAMESPACE_NAME)
|
||||
IMAGE_FILE_ID = 'img-cache-imgid'
|
||||
PROVIDER_LOCATION = 'fake_provider_location'
|
||||
NFS_HOST = 'nfs-host1'
|
||||
@ -65,6 +68,12 @@ LUN_METADATA = {
|
||||
'Qtree': None,
|
||||
'Volume': POOL_NAME,
|
||||
}
|
||||
NAMESPACE_METADATA = {
|
||||
'OsType': None,
|
||||
'Path': PATH_NAMESPACE,
|
||||
'Qtree': None,
|
||||
'Volume': POOL_NAME,
|
||||
}
|
||||
VOLUME = {
|
||||
'name': LUN_NAME,
|
||||
'size': SIZE,
|
||||
@ -72,6 +81,13 @@ VOLUME = {
|
||||
'host': HOST_STRING,
|
||||
'attach_status': DETACHED,
|
||||
}
|
||||
NAMESPACE_VOLUME = {
|
||||
'name': NAMESPACE_NAME,
|
||||
'size': SIZE,
|
||||
'id': VOLUME_ID,
|
||||
'host': HOST_STRING,
|
||||
'attach_status': DETACHED,
|
||||
}
|
||||
NFS_VOLUME = {
|
||||
'name': NFS_FILE_PATH,
|
||||
'size': SIZE,
|
||||
@ -349,6 +365,8 @@ VOLUME_PATH = '/vol/%s/%s' % (NETAPP_VOLUME, VOLUME_NAME)
|
||||
MOUNT_PATH = '168.10.16.11:/' + VOLUME_ID
|
||||
SNAPSHOT_NAME = 'fake_snapshot_name'
|
||||
SNAPSHOT_LUN_HANDLE = 'fake_snapshot_lun_handle'
|
||||
SNAPSHOT_NAMESPACE_HANDLE = 'fake_snapshot_namespace_handle'
|
||||
|
||||
SNAPSHOT_MOUNT = '/fake/mount/path'
|
||||
|
||||
SNAPSHOT = {
|
||||
@ -381,6 +399,21 @@ LUN_WITH_METADATA = {
|
||||
}
|
||||
}
|
||||
|
||||
NAMESPACE_WITH_METADATA = {
|
||||
'handle': 'vserver_fake:/vol/fake_flexvol/volume-fake-uuid',
|
||||
'name': 'volume-fake-uuid',
|
||||
'size': 20971520,
|
||||
'metadata': {
|
||||
'Vserver': 'vserver_fake',
|
||||
'Volume': 'fake_flexvol',
|
||||
'Qtree': None,
|
||||
'Path': '/vol/fake_flexvol/volume-fake-uuid',
|
||||
'OsType': 'linux',
|
||||
'SpaceReserved': 'false',
|
||||
'UUID': 'fake-uuid'
|
||||
}
|
||||
}
|
||||
|
||||
VOLUME_REF = {'name': 'fake_vref_name', 'size': 42}
|
||||
|
||||
FAKE_CMODE_VOLUMES = ['open123', 'mixed', 'open321']
|
||||
@ -723,6 +756,7 @@ test_volume.state = {
|
||||
test_volume.qos = {'qos_policy_group': None}
|
||||
test_volume.host = 'fakehost@backbackend#fakepool'
|
||||
test_volume.name = 'fakename'
|
||||
test_volume.size = SIZE
|
||||
|
||||
|
||||
class test_snapshot(object):
|
||||
@ -975,3 +1009,6 @@ ADAPTIVE_QOS_POLICY_GROUP_INFO_REST = {
|
||||
}
|
||||
|
||||
REST_FIELDS = 'uuid,name,style'
|
||||
SUBSYSTEM = 'openstack-fake-subsystem'
|
||||
HOST_NQN = 'nqn.1992-01.example.com:string'
|
||||
TARGET_NQN = 'nqn.1992-01.example.com:target'
|
||||
|
@ -0,0 +1,40 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Mock unit tests for NetApp Data ONTAP FibreChannel storage systems."""
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from cinder import context
|
||||
from cinder.tests.unit import test
|
||||
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
|
||||
from cinder.volume.drivers.netapp.dataontap import nvme_cmode
|
||||
|
||||
|
||||
class NetAppCmodeNVMeDriverTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetAppCmodeNVMeDriverTestCase, self).setUp()
|
||||
|
||||
kwargs = {
|
||||
'configuration': self.get_config_base(),
|
||||
'host': 'openstack@netappblock',
|
||||
}
|
||||
self.library = nvme_cmode.NetAppCmodeNVMeDriver(**kwargs)
|
||||
self.library.zapi_client = mock.Mock()
|
||||
self.zapi_client = self.library.zapi_client
|
||||
self.mock_request = mock.Mock()
|
||||
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
|
||||
|
||||
def get_config_base(self):
|
||||
return na_fakes.create_configuration()
|
@ -0,0 +1,930 @@
|
||||
# Copyright (c) 2023 NetApp, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Mock unit tests for the NetApp block storage library"""
|
||||
|
||||
import copy
|
||||
from unittest import mock
|
||||
import uuid
|
||||
|
||||
import ddt
|
||||
from oslo_utils import units
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.tests.unit import test
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
|
||||
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
|
||||
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from cinder.volume.drivers.netapp.dataontap import nvme_library
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NetAppNVMeStorageLibraryTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetAppNVMeStorageLibraryTestCase, self).setUp()
|
||||
|
||||
config = na_fakes.create_configuration_cmode()
|
||||
config.netapp_storage_protocol = 'nvme'
|
||||
config.netapp_login = 'admin'
|
||||
config.netapp_password = 'pass'
|
||||
config.netapp_server_hostname = '127.0.0.1'
|
||||
config.netapp_transport_type = 'https'
|
||||
config.netapp_server_port = '443'
|
||||
config.netapp_vserver = 'openstack'
|
||||
config.netapp_api_trace_pattern = 'fake_regex'
|
||||
|
||||
kwargs = {
|
||||
'configuration': config,
|
||||
'host': 'openstack@netappnvme',
|
||||
}
|
||||
|
||||
self.library = nvme_library.NetAppNVMeStorageLibrary(
|
||||
'driver', 'protocol', **kwargs)
|
||||
self.library.client = mock.Mock()
|
||||
self.client = self.library.client
|
||||
self.mock_request = mock.Mock()
|
||||
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
|
||||
self.vserver = fake.VSERVER_NAME
|
||||
|
||||
self.library.perf_library = mock.Mock()
|
||||
self.library.ssc_library = mock.Mock()
|
||||
self.library.vserver = mock.Mock()
|
||||
|
||||
# fakes objects.
|
||||
self.fake_namespace = nvme_library.NetAppNamespace(
|
||||
fake.NAMESPACE_HANDLE, fake.NAMESPACE_NAME, fake.SIZE,
|
||||
fake.NAMESPACE_METADATA)
|
||||
self.fake_snapshot_namespace = nvme_library.NetAppNamespace(
|
||||
fake.SNAPSHOT_NAMESPACE_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE,
|
||||
None)
|
||||
self.mock_object(self.library, 'namespace_table')
|
||||
self.library.namespace_table = {
|
||||
fake.NAMESPACE_NAME: self.fake_namespace,
|
||||
fake.SNAPSHOT_NAME: self.fake_snapshot_namespace,
|
||||
}
|
||||
|
||||
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
|
||||
@mock.patch.object(capabilities.CapabilitiesLibrary,
|
||||
'cluster_user_supported')
|
||||
@mock.patch.object(capabilities.CapabilitiesLibrary,
|
||||
'check_api_permissions')
|
||||
@mock.patch.object(na_utils, 'check_flags')
|
||||
def test_do_setup_san_unconfigured(self, mock_check_flags,
|
||||
mock_check_api_permissions,
|
||||
mock_cluster_user_supported):
|
||||
self.library.configuration.netapp_namespace_ostype = None
|
||||
self.library.configuration.netapp_host_type = None
|
||||
self.library.backend_name = 'fake_backend'
|
||||
fake_client = mock.Mock()
|
||||
fake_client.vserver = 'fake_vserver'
|
||||
self.mock_object(dot_utils, 'get_client_for_backend',
|
||||
return_value=fake_client)
|
||||
|
||||
self.library.do_setup(mock.Mock())
|
||||
|
||||
self.assertTrue(mock_check_flags.called)
|
||||
mock_check_api_permissions.assert_called_once_with()
|
||||
mock_cluster_user_supported.assert_called_once_with()
|
||||
self.assertEqual('linux', self.library.namespace_ostype)
|
||||
self.assertEqual('linux', self.library.host_type)
|
||||
dot_utils.get_client_for_backend.assert_called_once_with(
|
||||
'fake_backend', force_rest=True)
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
self.mock_object(self.library, '_get_flexvol_to_pool_map',
|
||||
return_value=fake.POOL_NAME)
|
||||
self.mock_object(self.library, '_add_looping_tasks')
|
||||
self.library.namespace_ostype = 'linux'
|
||||
self.library.host_type = 'linux'
|
||||
self.mock_object(self.library.client, 'get_namespace_list',
|
||||
return_value='fake_namespace_list')
|
||||
self.mock_object(self.library, '_extract_and_populate_namespaces')
|
||||
self.mock_object(self.library.loopingcalls, 'start_tasks')
|
||||
|
||||
self.library.check_for_setup_error()
|
||||
|
||||
self.library._get_flexvol_to_pool_map.assert_called_once_with()
|
||||
self.library._add_looping_tasks.assert_called_once_with()
|
||||
self.library.client.get_namespace_list.assert_called_once_with()
|
||||
self.library._extract_and_populate_namespaces.assert_called_once_with(
|
||||
'fake_namespace_list')
|
||||
self.library.loopingcalls.start_tasks.assert_called_once_with()
|
||||
|
||||
@ddt.data(
|
||||
{'pool_map': None, 'namespace': 'linux', 'host': 'linux'},
|
||||
{'pool_map': 'fake_map', 'namespace': 'fake', 'host': 'linux'},
|
||||
{'pool_map': 'fake_map', 'namespace': 'linux', 'host': 'fake'})
|
||||
@ddt.unpack
|
||||
def test_check_for_setup_error_error(self, pool_map, namespace, host):
|
||||
self.mock_object(self.library, '_get_flexvol_to_pool_map',
|
||||
return_value=pool_map)
|
||||
self.library.namespace_ostype = namespace
|
||||
self.library.host_type = host
|
||||
self.mock_object(self.library, '_add_looping_tasks')
|
||||
|
||||
self.assertRaises(
|
||||
na_utils.NetAppDriverException,
|
||||
self.library.check_for_setup_error)
|
||||
|
||||
def test_create_volume(self):
|
||||
volume_size_in_bytes = int(fake.SIZE) * units.Gi
|
||||
self.mock_object(volume_utils, 'extract_host',
|
||||
return_value=fake.POOL_NAME)
|
||||
self.mock_object(self.library.client, 'create_namespace')
|
||||
self.mock_object(self.library, '_create_namespace_handle')
|
||||
self.mock_object(self.library, '_add_namespace_to_table')
|
||||
|
||||
volume1 = copy.deepcopy(fake.test_volume)
|
||||
self.library.create_volume(volume1)
|
||||
|
||||
fake_metadata = {
|
||||
'OsType': self.library.namespace_ostype,
|
||||
'Path': '/vol/aggr1/fakename',
|
||||
'Volume': 'aggr1',
|
||||
'Qtree': None
|
||||
}
|
||||
self.library.client.create_namespace.assert_called_once_with(
|
||||
fake.POOL_NAME, 'fakename', volume_size_in_bytes, fake_metadata)
|
||||
self.library._create_namespace_handle.assert_called_once_with(
|
||||
fake_metadata)
|
||||
|
||||
def test_create_namespace_handle(self):
|
||||
self.library.vserver = fake.VSERVER_NAME
|
||||
res = self.library._create_namespace_handle(fake.NAMESPACE_METADATA)
|
||||
|
||||
self.assertEqual(f'{fake.VSERVER_NAME}:{fake.PATH_NAMESPACE}', res)
|
||||
|
||||
def test__extract_namespace_info(self):
|
||||
self.mock_object(self.library, '_create_namespace_handle',
|
||||
return_value=fake.NAMESPACE_HANDLE)
|
||||
|
||||
namespace = {'Path': fake.PATH_NAMESPACE, 'Size': fake.SIZE}
|
||||
res = self.library._extract_namespace_info(namespace)
|
||||
|
||||
self.assertEqual(fake.NAMESPACE_NAME, res.name)
|
||||
self.library._create_namespace_handle.assert_called_once_with(
|
||||
namespace)
|
||||
|
||||
def test__extract_and_populate_namespaces(self):
|
||||
self.mock_object(self.library, '_extract_namespace_info',
|
||||
return_value='fake_namespace')
|
||||
self.mock_object(self.library, '_add_namespace_to_table')
|
||||
|
||||
self.library._extract_and_populate_namespaces([fake.NAMESPACE_NAME])
|
||||
|
||||
self.library._extract_namespace_info.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME)
|
||||
self.library._add_namespace_to_table.assert_called_once_with(
|
||||
'fake_namespace')
|
||||
|
||||
def test__add_namespace_to_table(self):
|
||||
namespace = nvme_library.NetAppNamespace(
|
||||
fake.NAMESPACE_HANDLE, 'fake_namespace2', fake.SIZE, None)
|
||||
self.library._add_namespace_to_table(namespace)
|
||||
|
||||
has_namespace = 'fake_namespace2' in self.library.namespace_table
|
||||
self.assertTrue(has_namespace)
|
||||
self.assertEqual(namespace,
|
||||
self.library.namespace_table['fake_namespace2'])
|
||||
|
||||
def test__add_namespace_to_table_error(self):
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library._add_namespace_to_table,
|
||||
'fake'
|
||||
)
|
||||
|
||||
def test__get_namespace_from_table_error(self):
|
||||
self.mock_object(self.library.client, 'get_namespace_list',
|
||||
return_value='fake_list')
|
||||
self.mock_object(self.library, '_extract_and_populate_namespaces')
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeNotFound,
|
||||
self.library._get_namespace_from_table,
|
||||
'fake')
|
||||
|
||||
self.library.client.get_namespace_list.assert_called_once_with()
|
||||
self.library._extract_and_populate_namespaces.assert_called_once_with(
|
||||
'fake_list')
|
||||
|
||||
def test__get_namespace_from_table(self):
|
||||
|
||||
res = self.library._get_namespace_from_table(fake.NAMESPACE_NAME)
|
||||
|
||||
self.assertEqual(self.fake_namespace, res)
|
||||
|
||||
@ddt.data(exception.VolumeNotFound, netapp_api.NaApiError)
|
||||
def test__get_namespace_attr_error(self, error_obj):
|
||||
self.mock_object(self.library, '_get_namespace_from_table',
|
||||
side_effect=error_obj)
|
||||
|
||||
res = self.library._get_namespace_attr('namespace', 'name')
|
||||
|
||||
self.assertIsNone(res)
|
||||
|
||||
def test__get_namespace_attr(self):
|
||||
self.mock_object(self.library, '_get_namespace_from_table',
|
||||
return_value=self.fake_namespace)
|
||||
|
||||
res = self.library._get_namespace_attr('namespace', 'name')
|
||||
|
||||
self.assertEqual(fake.NAMESPACE_NAME, res)
|
||||
|
||||
def test_create_volume_error(self):
|
||||
self.mock_object(volume_utils, 'extract_host',
|
||||
return_value=fake.POOL_NAME)
|
||||
self.mock_object(self.library.client, 'create_namespace',
|
||||
side_effect=exception.VolumeBackendAPIException)
|
||||
self.mock_object(self.library, '_create_namespace_handle')
|
||||
self.mock_object(self.library, '_add_namespace_to_table')
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library.create_volume,
|
||||
copy.deepcopy(fake.test_volume))
|
||||
|
||||
def test__update_ssc(self):
|
||||
mock_get_flexvol = self.mock_object(
|
||||
self.library, '_get_flexvol_to_pool_map',
|
||||
return_value='fake_pool_map')
|
||||
self.library.ssc_library.update_ssc = mock.Mock()
|
||||
|
||||
self.library._update_ssc()
|
||||
|
||||
mock_get_flexvol.assert_called_once_with()
|
||||
self.library.ssc_library.update_ssc.assert_called_once_with(
|
||||
'fake_pool_map')
|
||||
|
||||
def test__find_mapped_namespace_subsystem(self):
|
||||
self.mock_object(self.library.client, 'get_subsystem_by_host',
|
||||
return_value=[{'name': fake.SUBSYSTEM}])
|
||||
self.mock_object(
|
||||
self.library.client, 'get_namespace_map',
|
||||
return_value=[{
|
||||
'subsystem': fake.SUBSYSTEM,
|
||||
'uuid': fake.UUID1
|
||||
}])
|
||||
|
||||
subsystem, n_uuid = self.library._find_mapped_namespace_subsystem(
|
||||
fake.NAMESPACE_NAME, fake.HOST_NQN)
|
||||
|
||||
self.assertEqual(fake.SUBSYSTEM, subsystem)
|
||||
self.assertEqual(fake.UUID1, n_uuid)
|
||||
self.library.client.get_subsystem_by_host.assert_called_once_with(
|
||||
fake.HOST_NQN)
|
||||
self.library.client.get_namespace_map.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME)
|
||||
|
||||
def test_delete_volume(self):
|
||||
self.mock_object(self.library, '_delete_namespace')
|
||||
|
||||
self.library.delete_volume(fake.NAMESPACE_VOLUME)
|
||||
|
||||
self.library._delete_namespace.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME)
|
||||
|
||||
def test__delete_namespace(self):
|
||||
namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA)
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=namespace['metadata'])
|
||||
self.mock_object(self.library.client, 'destroy_namespace')
|
||||
|
||||
self.library._delete_namespace(fake.NAMESPACE_NAME)
|
||||
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'metadata')
|
||||
self.library.client.destroy_namespace.assert_called_once_with(
|
||||
namespace['metadata']['Path'])
|
||||
has_namespace = fake.NAMESPACE_NAME in self.library.namespace_table
|
||||
self.assertFalse(has_namespace)
|
||||
|
||||
def test__delete_namespace_not_found(self):
|
||||
namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA)
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=namespace['metadata'])
|
||||
error = netapp_api.NaApiError(
|
||||
code=netapp_api.REST_NAMESPACE_EOBJECTNOTFOUND[0])
|
||||
self.mock_object(self.library.client, 'destroy_namespace',
|
||||
side_effect=error)
|
||||
|
||||
self.library._delete_namespace(fake.NAMESPACE_NAME)
|
||||
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'metadata')
|
||||
self.library.client.destroy_namespace.assert_called_once_with(
|
||||
namespace['metadata']['Path'])
|
||||
has_namespace = fake.NAMESPACE_NAME in self.library.namespace_table
|
||||
self.assertFalse(has_namespace)
|
||||
|
||||
def test__delete_namespace_error(self):
|
||||
namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA)
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=namespace['metadata'])
|
||||
self.mock_object(self.library.client, 'destroy_namespace',
|
||||
side_effect=netapp_api.NaApiError)
|
||||
|
||||
self.assertRaises(na_utils.NetAppDriverException,
|
||||
self.library._delete_namespace,
|
||||
fake.NAMESPACE_NAME)
|
||||
|
||||
def test__delete_namespace_no_metadata(self):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=None)
|
||||
self.mock_object(self.library.client, 'destroy_namespace')
|
||||
|
||||
self.library._delete_namespace(fake.NAMESPACE_NAME)
|
||||
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'metadata')
|
||||
self.library.client.destroy_namespace.assert_not_called()
|
||||
|
||||
def test_add_looping_tasks(self):
|
||||
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')
|
||||
self.mock_object(self.library, '_update_ssc')
|
||||
|
||||
self.library._add_looping_tasks()
|
||||
|
||||
self.library._update_ssc.assert_called_once_with()
|
||||
mock_add_task.assert_has_calls([
|
||||
mock.call(self.library._update_ssc, loopingcalls.ONE_HOUR,
|
||||
loopingcalls.ONE_HOUR),
|
||||
mock.call(self.library._handle_ems_logging,
|
||||
loopingcalls.ONE_HOUR)])
|
||||
|
||||
def test_handle_ems_logging(self):
|
||||
volume_list = ['vol0', 'vol1', 'vol2']
|
||||
self.mock_object(
|
||||
self.library.ssc_library, 'get_ssc_flexvol_names',
|
||||
return_value=volume_list)
|
||||
self.mock_object(
|
||||
dot_utils, 'build_ems_log_message_0',
|
||||
return_value='fake_base_ems_log_message')
|
||||
self.mock_object(
|
||||
dot_utils, 'build_ems_log_message_1',
|
||||
return_value='fake_pool_ems_log_message')
|
||||
mock_send_ems_log_message = self.mock_object(
|
||||
self.client, 'send_ems_log_message')
|
||||
|
||||
self.library._handle_ems_logging()
|
||||
|
||||
mock_send_ems_log_message.assert_has_calls([
|
||||
mock.call('fake_base_ems_log_message'),
|
||||
mock.call('fake_pool_ems_log_message'),
|
||||
])
|
||||
dot_utils.build_ems_log_message_0.assert_called_once_with(
|
||||
self.library.driver_name, self.library.app_version)
|
||||
dot_utils.build_ems_log_message_1.assert_called_once_with(
|
||||
self.library.driver_name, self.library.app_version,
|
||||
self.library.vserver, volume_list, [])
|
||||
|
||||
def test_get_pool(self):
|
||||
namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA)
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=namespace['metadata'])
|
||||
|
||||
res = self.library.get_pool(fake.VOLUME)
|
||||
|
||||
self.assertEqual('fake_flexvol', res)
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.LUN_NAME, 'metadata')
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
mock__delete = self.mock_object(self.library, '_delete_namespace')
|
||||
|
||||
self.library.delete_snapshot(fake.SNAPSHOT)
|
||||
|
||||
mock__delete.assert_called_once_with(fake.SNAPSHOT_NAME)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
self.mock_object(self.library, '_clone_source_to_destination')
|
||||
|
||||
self.library.create_volume_from_snapshot(fake.NAMESPACE_VOLUME,
|
||||
fake.SNAPSHOT)
|
||||
|
||||
self.library._clone_source_to_destination.assert_called_once_with(
|
||||
{'name': fake.SNAPSHOT_NAME, 'size': fake.SIZE},
|
||||
fake.NAMESPACE_VOLUME)
|
||||
|
||||
def test_create_cloned_volume(self):
|
||||
self.mock_object(self.library, '_get_namespace_from_table',
|
||||
return_value=self.fake_namespace)
|
||||
self.mock_object(self.library, '_clone_source_to_destination')
|
||||
|
||||
src_volume = {'size': fake.SIZE, 'name': 'fake_name'}
|
||||
self.library.create_cloned_volume(fake.NAMESPACE_VOLUME, src_volume)
|
||||
|
||||
self.library._get_namespace_from_table.assert_called_once_with(
|
||||
'fake_name')
|
||||
self.library._clone_source_to_destination.assert_called_once_with(
|
||||
{'name': fake.NAMESPACE_NAME, 'size': fake.SIZE},
|
||||
fake.NAMESPACE_VOLUME)
|
||||
|
||||
def test_clone_source_to_destination(self):
|
||||
self.mock_object(self.library, '_clone_namespace')
|
||||
self.mock_object(self.library, '_extend_volume')
|
||||
self.mock_object(self.library, 'delete_volume')
|
||||
|
||||
source_vol = {'size': fake.SIZE, 'name': 'fake_source'}
|
||||
dest_size = fake.SIZE + 12
|
||||
dest_vol = {'size': dest_size, 'name': 'fake_dest'}
|
||||
self.library._clone_source_to_destination(source_vol, dest_vol)
|
||||
|
||||
self.library._clone_namespace.assert_called_once_with(
|
||||
'fake_source', 'fake_dest')
|
||||
self.library._extend_volume.assert_called_once_with(
|
||||
dest_vol, dest_size)
|
||||
self.library.delete_volume.assert_not_called()
|
||||
|
||||
def test_clone_source_to_destination_clone_error(self):
|
||||
self.mock_object(self.library, '_clone_namespace',
|
||||
side_effect=exception.VolumeBackendAPIException)
|
||||
self.mock_object(self.library, '_extend_volume')
|
||||
self.mock_object(self.library, 'delete_volume')
|
||||
|
||||
source_vol = {'size': fake.SIZE, 'name': 'fake_source'}
|
||||
dest_size = fake.SIZE + 12
|
||||
dest_vol = {'size': dest_size, 'name': 'fake_dest'}
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library._clone_source_to_destination,
|
||||
source_vol, dest_vol)
|
||||
|
||||
def test_clone_source_to_destination_extend_error(self):
|
||||
self.mock_object(self.library, '_clone_namespace')
|
||||
self.mock_object(self.library, '_extend_volume',
|
||||
side_effect=exception.VolumeBackendAPIException)
|
||||
self.mock_object(self.library, 'delete_volume')
|
||||
|
||||
source_vol = {'size': fake.SIZE, 'name': 'fake_source'}
|
||||
dest_size = fake.SIZE + 12
|
||||
dest_vol = {'size': dest_size, 'name': 'fake_dest'}
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library._clone_source_to_destination,
|
||||
source_vol, dest_vol)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_get_volume_stats(self, refresh):
|
||||
self.library._stats = 'fake_stats'
|
||||
self.mock_object(self.library, '_update_volume_stats')
|
||||
|
||||
res = self.library.get_volume_stats(refresh, filter_function='filter',
|
||||
goodness_function='good')
|
||||
|
||||
self.assertEqual('fake_stats', res)
|
||||
if refresh:
|
||||
self.library._update_volume_stats.assert_called_once_with(
|
||||
filter_function='filter', goodness_function='good')
|
||||
else:
|
||||
self.library._update_volume_stats.assert_not_called()
|
||||
|
||||
def test__update_volume_stats(self):
|
||||
|
||||
self.library.VERSION = '1.0.0'
|
||||
self.library.driver_protocol = 'nvme'
|
||||
self.mock_object(self.library, '_get_pool_stats',
|
||||
return_value='fake_pools')
|
||||
self.library._update_volume_stats(filter_function='filter',
|
||||
goodness_function='good')
|
||||
|
||||
expected_ssc = {
|
||||
'volume_backend_name': 'driver',
|
||||
'vendor_name': 'NetApp',
|
||||
'driver_version': '1.0.0',
|
||||
'pools': 'fake_pools',
|
||||
'sparse_copy_volume': True,
|
||||
'replication_enabled': False,
|
||||
'storage_protocol': 'nvme',
|
||||
}
|
||||
self.assertEqual(expected_ssc, self.library._stats)
|
||||
|
||||
@ddt.data({'cluster_credentials': False,
|
||||
'report_provisioned_capacity': False},
|
||||
{'cluster_credentials': True,
|
||||
'report_provisioned_capacity': True})
|
||||
@ddt.unpack
|
||||
def test_get_pool_stats(self, cluster_credentials,
|
||||
report_provisioned_capacity):
|
||||
self.library.using_cluster_credentials = cluster_credentials
|
||||
conf = self.library.configuration
|
||||
conf.netapp_driver_reports_provisioned_capacity = (
|
||||
report_provisioned_capacity)
|
||||
|
||||
ssc = {
|
||||
'vola': {
|
||||
'pool_name': 'vola',
|
||||
'thick_provisioning_support': True,
|
||||
'thin_provisioning_support': False,
|
||||
'netapp_thin_provisioned': 'false',
|
||||
'netapp_compression': 'false',
|
||||
'netapp_mirrored': 'false',
|
||||
'netapp_dedup': 'true',
|
||||
'netapp_aggregate': 'aggr1',
|
||||
'netapp_raid_type': 'raid_dp',
|
||||
'netapp_disk_type': 'SSD',
|
||||
'netapp_is_flexgroup': 'false',
|
||||
},
|
||||
}
|
||||
mock_get_ssc = self.mock_object(self.library.ssc_library,
|
||||
'get_ssc',
|
||||
return_value=ssc)
|
||||
mock_get_aggrs = self.mock_object(self.library.ssc_library,
|
||||
'get_ssc_aggregates',
|
||||
return_value=['aggr1'])
|
||||
|
||||
self.library.reserved_percentage = 5
|
||||
self.library.max_over_subscription_ratio = 10
|
||||
self.library.perf_library.get_node_utilization_for_pool = (
|
||||
mock.Mock(return_value=30.0))
|
||||
mock_capacities = {
|
||||
'size-total': 10737418240.0,
|
||||
'size-available': 2147483648.0,
|
||||
}
|
||||
namespaces_provisioned_cap = [{
|
||||
'path': '/vol/volume-ae947c9b-2392-4956-b373-aaac4521f37e',
|
||||
'size': 5368709120.0 # 5GB
|
||||
}, {
|
||||
'path': '/vol/snapshot-527eedad-a431-483d-b0ca-18995dd65b66',
|
||||
'size': 1073741824.0 # 1GB
|
||||
}]
|
||||
self.mock_object(self.client,
|
||||
'get_flexvol_capacity',
|
||||
return_value=mock_capacities)
|
||||
self.mock_object(self.client,
|
||||
'get_namespace_sizes_by_volume',
|
||||
return_value=namespaces_provisioned_cap)
|
||||
self.mock_object(self.client,
|
||||
'get_flexvol_dedupe_used_percent',
|
||||
return_value=55.0)
|
||||
|
||||
aggr_capacities = {
|
||||
'aggr1': {
|
||||
'percent-used': 45,
|
||||
'size-available': 59055800320.0,
|
||||
'size-total': 107374182400.0,
|
||||
},
|
||||
}
|
||||
mock_get_aggr_capacities = self.mock_object(
|
||||
self.client, 'get_aggregate_capacities',
|
||||
return_value=aggr_capacities)
|
||||
|
||||
result = self.library._get_pool_stats(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
expected = [{
|
||||
'pool_name': 'vola',
|
||||
'QoS_support': False,
|
||||
'consistencygroup_support': False,
|
||||
'consistent_group_snapshot_enabled': False,
|
||||
'reserved_percentage': 5,
|
||||
'max_over_subscription_ratio': 10,
|
||||
'multiattach': False,
|
||||
'total_capacity_gb': 10.0,
|
||||
'free_capacity_gb': 2.0,
|
||||
'netapp_dedupe_used_percent': 55.0,
|
||||
'netapp_aggregate_used_percent': 45,
|
||||
'utilization': 30.0,
|
||||
'filter_function': 'filter',
|
||||
'goodness_function': 'goodness',
|
||||
'thick_provisioning_support': True,
|
||||
'thin_provisioning_support': False,
|
||||
'netapp_thin_provisioned': 'false',
|
||||
'netapp_compression': 'false',
|
||||
'netapp_mirrored': 'false',
|
||||
'netapp_dedup': 'true',
|
||||
'netapp_aggregate': 'aggr1',
|
||||
'netapp_raid_type': 'raid_dp',
|
||||
'netapp_disk_type': 'SSD',
|
||||
'online_extend_support': False,
|
||||
'netapp_is_flexgroup': 'false',
|
||||
}]
|
||||
if report_provisioned_capacity:
|
||||
expected[0].update({'provisioned_capacity_gb': 5.0})
|
||||
|
||||
if not cluster_credentials:
|
||||
expected[0].update({
|
||||
'netapp_aggregate_used_percent': 0,
|
||||
'netapp_dedupe_used_percent': 0.0
|
||||
})
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
mock_get_ssc.assert_called_once_with()
|
||||
if cluster_credentials:
|
||||
mock_get_aggrs.assert_called_once_with()
|
||||
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
|
||||
|
||||
@ddt.data({}, None)
|
||||
def test_get_pool_stats_no_ssc_vols(self, ssc):
|
||||
|
||||
mock_get_ssc = self.mock_object(self.library.ssc_library,
|
||||
'get_ssc',
|
||||
return_value=ssc)
|
||||
|
||||
pools = self.library._get_pool_stats()
|
||||
|
||||
self.assertListEqual([], pools)
|
||||
mock_get_ssc.assert_called_once_with()
|
||||
|
||||
@ddt.data(r'open+|demix+', 'open.+', r'.+\d', '^((?!mix+).)*$',
|
||||
'open123, open321')
|
||||
def test_get_pool_map_match_selected_pools(self, patterns):
|
||||
|
||||
self.library.configuration.netapp_pool_name_search_pattern = patterns
|
||||
mock_list_flexvols = self.mock_object(
|
||||
self.library.client, 'list_flexvols',
|
||||
return_value=fake.FAKE_CMODE_VOLUMES)
|
||||
|
||||
result = self.library._get_flexvol_to_pool_map()
|
||||
|
||||
expected = {
|
||||
'open123': {
|
||||
'pool_name': 'open123',
|
||||
},
|
||||
'open321': {
|
||||
'pool_name': 'open321',
|
||||
},
|
||||
}
|
||||
self.assertEqual(expected, result)
|
||||
mock_list_flexvols.assert_called_once_with()
|
||||
|
||||
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321',
|
||||
'.*?')
|
||||
def test_get_pool_map_match_all_pools(self, patterns):
|
||||
|
||||
self.library.configuration.netapp_pool_name_search_pattern = patterns
|
||||
mock_list_flexvols = self.mock_object(
|
||||
self.library.client, 'list_flexvols',
|
||||
return_value=fake.FAKE_CMODE_VOLUMES)
|
||||
|
||||
result = self.library._get_flexvol_to_pool_map()
|
||||
|
||||
self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result)
|
||||
mock_list_flexvols.assert_called_once_with()
|
||||
|
||||
def test_get_pool_map_invalid_conf(self):
|
||||
"""Verify an exception is raised if the regex pattern is invalid"""
|
||||
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
|
||||
|
||||
self.assertRaises(exception.InvalidConfigurationValue,
|
||||
self.library._get_flexvol_to_pool_map)
|
||||
|
||||
@ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack',
|
||||
'abc*', '^$')
|
||||
def test_get_pool_map_non_matching_patterns(self, patterns):
|
||||
|
||||
self.library.configuration.netapp_pool_name_search_pattern = patterns
|
||||
mock_list_flexvols = self.mock_object(
|
||||
self.library.client, 'list_flexvols',
|
||||
return_value=fake.FAKE_CMODE_VOLUMES)
|
||||
|
||||
result = self.library._get_flexvol_to_pool_map()
|
||||
|
||||
self.assertEqual({}, result)
|
||||
mock_list_flexvols.assert_called_once_with()
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self.mock_object(self.library, '_create_snapshot')
|
||||
|
||||
self.library.create_snapshot('fake_snap')
|
||||
|
||||
self.library._create_snapshot.assert_called_once_with('fake_snap')
|
||||
|
||||
def test__create_snapshot(self):
|
||||
self.mock_object(self.library, '_get_namespace_from_table',
|
||||
return_value=self.fake_namespace)
|
||||
self.mock_object(self.library, '_clone_namespace')
|
||||
|
||||
self.library._create_snapshot(fake.SNAPSHOT)
|
||||
|
||||
self.library._get_namespace_from_table.assert_called_once_with(
|
||||
fake.VOLUME_NAME)
|
||||
self.library._clone_namespace.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, fake.SNAPSHOT_NAME)
|
||||
|
||||
def test__clone_namespace_error(self):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=fake.NAMESPACE_METADATA)
|
||||
self.mock_object(self.library.client, 'clone_namespace')
|
||||
self.mock_object(self.library.client, 'get_namespace_by_args',
|
||||
return_value=[])
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library._clone_namespace,
|
||||
fake.NAMESPACE_NAME,
|
||||
'fake_new_name')
|
||||
|
||||
def test__clone_namespace(self):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=fake.NAMESPACE_METADATA)
|
||||
self.mock_object(self.library.client, 'clone_namespace')
|
||||
fake_namespace_res = {
|
||||
'Vserver': fake.VSERVER_NAME,
|
||||
'Path': fake.NAMESPACE_NAME,
|
||||
'Size': 1024
|
||||
}
|
||||
self.mock_object(self.library.client, 'get_namespace_by_args',
|
||||
return_value=[fake_namespace_res])
|
||||
self.mock_object(self.library, '_add_namespace_to_table')
|
||||
|
||||
self.library._clone_namespace(fake.NAMESPACE_NAME, 'fake_new_name')
|
||||
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'metadata')
|
||||
self.library.client.clone_namespace.assert_called_once_with(
|
||||
fake.POOL_NAME, fake.NAMESPACE_NAME, 'fake_new_name')
|
||||
self.library.client.get_namespace_by_args.assert_called_once()
|
||||
self.library._add_namespace_to_table.assert_called_once()
|
||||
|
||||
def test_ensure_export(self):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value='fake_handle')
|
||||
|
||||
res = self.library.ensure_export(mock.Mock(), fake.NAMESPACE_VOLUME)
|
||||
|
||||
self.assertEqual({'provider_location': 'fake_handle'}, res)
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'handle')
|
||||
|
||||
def test_create_export(self):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value='fake_handle')
|
||||
|
||||
res = self.library.create_export(mock.Mock(), fake.NAMESPACE_VOLUME)
|
||||
|
||||
self.assertEqual({'provider_location': 'fake_handle'}, res)
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'handle')
|
||||
|
||||
def test__extend_volume(self):
|
||||
self.mock_object(self.library, '_get_namespace_from_table',
|
||||
return_value=self.fake_namespace)
|
||||
self.mock_object(self.library.client, 'namespace_resize')
|
||||
|
||||
self.library._extend_volume(fake.NAMESPACE_VOLUME, fake.SIZE)
|
||||
|
||||
new_bytes = str(int(fake.SIZE) * units.Gi)
|
||||
self.assertEqual(new_bytes, self.fake_namespace.size)
|
||||
self.library._get_namespace_from_table.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME)
|
||||
self.library.client.namespace_resize.assert_called_once_with(
|
||||
fake.PATH_NAMESPACE, new_bytes)
|
||||
|
||||
@ddt.data([{'name': fake.SUBSYSTEM, 'os_type': 'linux'}], [])
|
||||
def test__get_or_create_subsystem(self, subs):
|
||||
self.mock_object(self.library.client, 'get_subsystem_by_host',
|
||||
return_value=subs)
|
||||
self.mock_object(self.library.client, 'create_subsystem')
|
||||
self.mock_object(uuid, 'uuid4', return_value='fake_uuid')
|
||||
|
||||
sub, os = self.library._get_or_create_subsystem(fake.HOST_NQN, 'linux')
|
||||
|
||||
self.library.client.get_subsystem_by_host.assert_called_once_with(
|
||||
fake.HOST_NQN)
|
||||
self.assertEqual('linux', os)
|
||||
if subs:
|
||||
self.assertEqual(fake.SUBSYSTEM, sub)
|
||||
else:
|
||||
self.library.client.create_subsystem.assert_called_once_with(
|
||||
sub, 'linux', fake.HOST_NQN)
|
||||
expected_sub = 'openstack-fake_uuid'
|
||||
self.assertEqual(expected_sub, sub)
|
||||
|
||||
def test__map_namespace(self):
|
||||
self.library.host_type = 'win'
|
||||
self.mock_object(self.library, '_get_or_create_subsystem',
|
||||
return_value=(fake.SUBSYSTEM, 'linux'))
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=fake.NAMESPACE_METADATA)
|
||||
self.mock_object(self.library.client, 'map_namespace',
|
||||
return_value=fake.UUID1)
|
||||
|
||||
sub, n_uuid = self.library._map_namespace(
|
||||
fake.NAMESPACE_NAME, fake.HOST_NQN)
|
||||
|
||||
self.assertEqual(fake.SUBSYSTEM, sub)
|
||||
self.assertEqual(fake.UUID1, n_uuid)
|
||||
self.library._get_or_create_subsystem.assert_called_once_with(
|
||||
fake.HOST_NQN, 'win')
|
||||
self.library.client.map_namespace.assert_called_once_with(
|
||||
fake.PATH_NAMESPACE, fake.SUBSYSTEM)
|
||||
|
||||
def test_initialize_connection(self):
|
||||
self.mock_object(self.library, '_map_namespace',
|
||||
return_value=(fake.SUBSYSTEM, fake.UUID1))
|
||||
self.mock_object(self.library.client, 'get_nvme_subsystem_nqn',
|
||||
return_value=fake.TARGET_NQN)
|
||||
self.mock_object(self.library.client, 'get_nvme_target_portals',
|
||||
return_value=['fake_ip'])
|
||||
|
||||
res = self.library.initialize_connection(
|
||||
fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN})
|
||||
|
||||
expected_conn_info = {
|
||||
"driver_volume_type": "nvmeof",
|
||||
"data": {
|
||||
"target_nqn": fake.TARGET_NQN,
|
||||
"host_nqn": fake.HOST_NQN,
|
||||
"portals": [('fake_ip', 4420, 'tcp')],
|
||||
"vol_uuid": fake.UUID1
|
||||
}
|
||||
}
|
||||
self.assertEqual(expected_conn_info, res)
|
||||
self.library._map_namespace.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, fake.HOST_NQN)
|
||||
self.library.client.get_nvme_subsystem_nqn.assert_called_once_with(
|
||||
fake.SUBSYSTEM)
|
||||
self.library.client.get_nvme_target_portals.assert_called_once_with()
|
||||
|
||||
def test_initialize_connection_error_no_host(self):
|
||||
self.mock_object(self.library, '_map_namespace',
|
||||
return_value=(fake.SUBSYSTEM, fake.UUID1))
|
||||
self.mock_object(self.library.client, 'get_nvme_subsystem_nqn',
|
||||
return_value=fake.TARGET_NQN)
|
||||
self.mock_object(self.library.client, 'get_nvme_target_portals',
|
||||
return_value=['fake_ip'])
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library.initialize_connection,
|
||||
fake.NAMESPACE_VOLUME, {})
|
||||
|
||||
def test_initialize_connection_error_no_target(self):
|
||||
self.mock_object(self.library, '_map_namespace',
|
||||
return_value=(fake.SUBSYSTEM, fake.UUID1))
|
||||
self.mock_object(self.library.client, 'get_nvme_subsystem_nqn',
|
||||
return_value=None)
|
||||
self.mock_object(self.library.client, 'get_nvme_target_portals',
|
||||
return_value=['fake_ip'])
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library.initialize_connection,
|
||||
fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN})
|
||||
|
||||
def test_initialize_connection_error_no_portals(self):
|
||||
self.mock_object(self.library, '_map_namespace',
|
||||
return_value=(fake.SUBSYSTEM, fake.UUID1))
|
||||
self.mock_object(self.library.client, 'get_nvme_subsystem_nqn',
|
||||
return_value=fake.TARGET_NQN)
|
||||
self.mock_object(self.library.client, 'get_nvme_target_portals',
|
||||
return_value=[])
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.library.initialize_connection,
|
||||
fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN})
|
||||
|
||||
@ddt.data(fake.HOST_NQN, None)
|
||||
def test__unmap_namespace(self, host_nqn):
|
||||
mock_find = self.mock_object(
|
||||
self.library, '_find_mapped_namespace_subsystem',
|
||||
return_value=(fake.SUBSYSTEM, 'fake'))
|
||||
self.mock_object(self.library.client, 'get_namespace_map',
|
||||
return_value=[{'subsystem': fake.SUBSYSTEM}])
|
||||
self.mock_object(self.library.client, 'unmap_namespace')
|
||||
|
||||
self.library._unmap_namespace(fake.PATH_NAMESPACE, host_nqn)
|
||||
|
||||
if host_nqn:
|
||||
mock_find.assert_called_once_with(fake.PATH_NAMESPACE,
|
||||
fake.HOST_NQN)
|
||||
self.library.client.get_namespace_map.assert_not_called()
|
||||
else:
|
||||
self.library._find_mapped_namespace_subsystem.assert_not_called()
|
||||
self.library.client.get_namespace_map.assert_called_once_with(
|
||||
fake.PATH_NAMESPACE)
|
||||
self.library.client.unmap_namespace.assert_called_once_with(
|
||||
fake.PATH_NAMESPACE, fake.SUBSYSTEM)
|
||||
|
||||
@ddt.data(None, {'nqn': fake.HOST_NQN})
|
||||
def test_terminate_connection(self, connector):
|
||||
self.mock_object(self.library, '_get_namespace_attr',
|
||||
return_value=fake.NAMESPACE_METADATA)
|
||||
self.mock_object(self.library, '_unmap_namespace')
|
||||
|
||||
self.library.terminate_connection(fake.NAMESPACE_VOLUME, connector)
|
||||
|
||||
self.library._get_namespace_attr.assert_called_once_with(
|
||||
fake.NAMESPACE_NAME, 'metadata')
|
||||
host = connector['nqn'] if connector else None
|
||||
self.library._unmap_namespace(fake.PATH_NAMESPACE, host)
|
@ -39,7 +39,8 @@ NETAPP_UNIFIED_DRIVER_REGISTRY = {
|
||||
{
|
||||
'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver',
|
||||
'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver',
|
||||
'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver'
|
||||
'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver',
|
||||
'nvme': DATAONTAP_PATH + '.nvme_cmode.NetAppCmodeNVMeDriver'
|
||||
}}
|
||||
|
||||
|
||||
|
@ -651,6 +651,7 @@ REST_SNAPMIRROR_IN_PROGRESS = '13303810'
|
||||
REST_UPDATE_SNAPMIRROR_FAILED = '13303844'
|
||||
REST_NO_SUCH_LUN_MAP = '5374922'
|
||||
REST_NO_SUCH_FILE = '6684674'
|
||||
REST_NAMESPACE_EOBJECTNOTFOUND = ('72090006', '72090006')
|
||||
|
||||
|
||||
class RestNaServer(object):
|
||||
|
@ -2519,3 +2519,276 @@ class RestClient(object):
|
||||
self.send_request(
|
||||
f'/storage/volumes/{unique_volume["uuid"]}/files/{orig_file_name}',
|
||||
'patch', body=body)
|
||||
|
||||
def get_namespace_list(self):
|
||||
"""Gets the list of namespaces on filer.
|
||||
|
||||
Gets the namespaces from cluster with vserver.
|
||||
"""
|
||||
|
||||
query = {
|
||||
'svm.name': self.vserver,
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid'
|
||||
}
|
||||
|
||||
response = self.send_request(
|
||||
'/storage/namespaces/', 'get', query=query)
|
||||
|
||||
namespace_list = []
|
||||
for namespace in response.get('records', []):
|
||||
namespace_info = {}
|
||||
namespace_info['Vserver'] = namespace['svm']['name']
|
||||
namespace_info['Volume'] = namespace['location']['volume']['name']
|
||||
namespace_info['Size'] = namespace['space']['size']
|
||||
namespace_info['Qtree'] = (
|
||||
namespace['location'].get('qtree', {}).get('name', ''))
|
||||
namespace_info['Path'] = namespace['name']
|
||||
namespace_info['OsType'] = namespace['os_type']
|
||||
namespace_info['SpaceReserved'] = (
|
||||
namespace['space']['guarantee']['requested'])
|
||||
namespace_info['UUID'] = namespace['uuid']
|
||||
|
||||
namespace_list.append(namespace_info)
|
||||
|
||||
return namespace_list
|
||||
|
||||
def create_namespace(self, volume_name, namespace_name, size, metadata):
|
||||
"""Issues API request for creating namespace on volume."""
|
||||
|
||||
path = f'/vol/{volume_name}/{namespace_name}'
|
||||
initial_size = size
|
||||
|
||||
body = {
|
||||
'name': path,
|
||||
'space.size': str(initial_size),
|
||||
'os_type': metadata['OsType'],
|
||||
}
|
||||
|
||||
try:
|
||||
self.send_request('/storage/namespaces', 'post', body=body)
|
||||
except netapp_api.NaApiError as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error('Error provisioning volume %(namespace_name)s on '
|
||||
'%(volume_name)s. Details: %(ex)s',
|
||||
{
|
||||
'namespace_name': namespace_name,
|
||||
'volume_name': volume_name,
|
||||
'ex': ex,
|
||||
})
|
||||
|
||||
def destroy_namespace(self, path, force=True):
|
||||
"""Destroys the namespace at the path."""
|
||||
query = {
|
||||
'name': path,
|
||||
'svm': self.vserver
|
||||
}
|
||||
|
||||
if force:
|
||||
query['allow_delete_while_mapped'] = 'true'
|
||||
|
||||
self.send_request('/storage/namespaces', 'delete', query=query)
|
||||
|
||||
def clone_namespace(self, volume, name, new_name):
|
||||
"""Clones namespace on vserver."""
|
||||
LOG.debug('Cloning namespace - volume: %(volume)s, name: %(name)s, '
|
||||
'new_name: %(new_name)s',
|
||||
{
|
||||
'volume': volume,
|
||||
'name': name,
|
||||
'new_name': new_name,
|
||||
})
|
||||
|
||||
source_path = f'/vol/{volume}/{name}'
|
||||
body = {
|
||||
'svm': {
|
||||
'name': self.vserver
|
||||
},
|
||||
'name': f'/vol/{volume}/{new_name}',
|
||||
'clone': {
|
||||
'source': {
|
||||
'name': source_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
self.send_request('/storage/namespaces', 'post', body=body)
|
||||
|
||||
def get_namespace_by_args(self, **namespace_info_args):
|
||||
"""Retrieves namespace with specified args."""
|
||||
|
||||
query = {
|
||||
'fields': 'svm.name,location.volume.name,space.size,'
|
||||
'location.qtree.name,name,os_type,'
|
||||
'space.guarantee.requested,uuid,space.block_size'
|
||||
}
|
||||
|
||||
if namespace_info_args:
|
||||
if 'vserver' in namespace_info_args:
|
||||
query['svm.name'] = namespace_info_args['vserver']
|
||||
if 'path' in namespace_info_args:
|
||||
query['name'] = namespace_info_args['path']
|
||||
if 'uuid' in namespace_info_args:
|
||||
query['uuid'] = namespace_info_args['uuid']
|
||||
|
||||
response = self.send_request('/storage/namespaces', 'get', query=query)
|
||||
|
||||
namespace_list = []
|
||||
for namespace in response.get('records', []):
|
||||
namespace_info = {}
|
||||
namespace_info['Vserver'] = namespace['svm']['name']
|
||||
namespace_info['Volume'] = namespace['location']['volume']['name']
|
||||
namespace_info['Size'] = namespace['space']['size']
|
||||
namespace_info['Qtree'] = (
|
||||
namespace['location'].get('qtree', {}).get('name', ''))
|
||||
namespace_info['Path'] = namespace['name']
|
||||
namespace_info['OsType'] = namespace['os_type']
|
||||
namespace_info['SpaceReserved'] = (
|
||||
namespace['space']['guarantee']['requested'])
|
||||
namespace_info['UUID'] = namespace['uuid']
|
||||
namespace_info['BlockSize'] = namespace['space']['block_size']
|
||||
|
||||
namespace_list.append(namespace_info)
|
||||
|
||||
return namespace_list
|
||||
|
||||
def namespace_resize(self, path, new_size_bytes):
|
||||
"""Resize the namespace."""
|
||||
seg = path.split("/")
|
||||
LOG.info('Resizing namespace %s to new size.', seg[-1])
|
||||
|
||||
body = {'space.size': new_size_bytes}
|
||||
query = {'name': path}
|
||||
self.send_request('/storage/namespaces', 'patch', body=body,
|
||||
query=query)
|
||||
|
||||
def get_namespace_sizes_by_volume(self, volume_name):
|
||||
""""Gets the list of namespace and their sizes from a given volume."""
|
||||
|
||||
query = {
|
||||
'location.volume.name': volume_name,
|
||||
'fields': 'space.size,name'
|
||||
}
|
||||
response = self.send_request('/storage/namespaces', 'get', query=query)
|
||||
|
||||
namespaces = []
|
||||
for namespace_info in response.get('records', []):
|
||||
namespaces.append({
|
||||
'path': namespace_info.get('name', ''),
|
||||
'size': float(namespace_info.get('space', {}).get('size', 0))
|
||||
})
|
||||
|
||||
return namespaces
|
||||
|
||||
def get_subsystem_by_host(self, host_nqn):
|
||||
"""Get subsystem exactly matching the initiator host."""
|
||||
query = {
|
||||
'svm.name': self.vserver,
|
||||
'hosts.nqn': host_nqn,
|
||||
'fields': 'name,os_type',
|
||||
'name': f'{na_utils.OPENSTACK_PREFIX}*',
|
||||
}
|
||||
response = self.send_request('/protocols/nvme/subsystems', 'get',
|
||||
query=query)
|
||||
|
||||
records = response.get('records', [])
|
||||
|
||||
return [{'name': subsystem['name'], 'os_type': subsystem['os_type']}
|
||||
for subsystem in records]
|
||||
|
||||
def create_subsystem(self, subsystem_name, os_type, host_nqn):
|
||||
"""Creates subsystem with specified args."""
|
||||
body = {
|
||||
'svm.name': self.vserver,
|
||||
'name': subsystem_name,
|
||||
'os_type': os_type,
|
||||
'hosts': [{'nqn': host_nqn}]
|
||||
}
|
||||
self.send_request('/protocols/nvme/subsystems', 'post', body=body)
|
||||
|
||||
def get_namespace_map(self, path):
|
||||
"""Gets the namespace map using its path."""
|
||||
query = {
|
||||
'namespace.name': path,
|
||||
'fields': 'subsystem.name,namespace.uuid,svm.name',
|
||||
}
|
||||
response = self.send_request('/protocols/nvme/subsystem-maps',
|
||||
'get',
|
||||
query=query)
|
||||
|
||||
records = response.get('records', [])
|
||||
map_list = []
|
||||
for map in records:
|
||||
map_subsystem = {}
|
||||
map_subsystem['subsystem'] = map['subsystem']['name']
|
||||
map_subsystem['uuid'] = map['namespace']['uuid']
|
||||
map_subsystem['vserver'] = map['svm']['name']
|
||||
|
||||
map_list.append(map_subsystem)
|
||||
|
||||
return map_list
|
||||
|
||||
def map_namespace(self, path, subsystem_name):
|
||||
"""Maps namespace to the host nqn and returns namespace uuid."""
|
||||
|
||||
body_post = {
|
||||
'namespace.name': path,
|
||||
'subsystem.name': subsystem_name
|
||||
}
|
||||
try:
|
||||
result = self.send_request('/protocols/nvme/subsystem-maps',
|
||||
'post',
|
||||
body=body_post,
|
||||
query={'return_records': 'true'})
|
||||
records = result.get('records')
|
||||
namespace_uuid = records[0]['namespace']['uuid']
|
||||
return namespace_uuid
|
||||
except netapp_api.NaApiError as e:
|
||||
code = e.code
|
||||
message = e.message
|
||||
LOG.warning('Error mapping namespace. Code :%(code)s, Message: '
|
||||
'%(message)s', {'code': code, 'message': message})
|
||||
raise
|
||||
|
||||
def get_nvme_subsystem_nqn(self, subsystem):
|
||||
"""Returns target subsystem nqn."""
|
||||
query = {
|
||||
'fields': 'target_nqn',
|
||||
'name': subsystem,
|
||||
'svm.name': self.vserver
|
||||
}
|
||||
response = self.send_request(
|
||||
'/protocols/nvme/subsystems', 'get', query=query)
|
||||
|
||||
records = response.get('records', [])
|
||||
if records:
|
||||
return records[0]['target_nqn']
|
||||
|
||||
LOG.debug('No %(subsystem)s NVMe subsystem found for vserver '
|
||||
'%(vserver)s',
|
||||
{'subsystem': subsystem, 'vserver': self.vserver})
|
||||
return None
|
||||
|
||||
def get_nvme_target_portals(self):
|
||||
"""Gets the NVMe target portal details."""
|
||||
query = {
|
||||
'services': 'data_nvme_tcp',
|
||||
'fields': 'ip.address',
|
||||
'enabled': 'true',
|
||||
}
|
||||
|
||||
response = self.send_request('/network/ip/interfaces', 'get',
|
||||
query=query)
|
||||
|
||||
interfaces = response.get('records', [])
|
||||
return [record['ip']['address'] for record in interfaces]
|
||||
|
||||
def unmap_namespace(self, path, subsystem):
|
||||
"""Unmaps a namespace from given subsystem."""
|
||||
|
||||
query = {
|
||||
'subsystem.name': subsystem,
|
||||
'namespace.name': path
|
||||
}
|
||||
self.send_request('/protocols/nvme/subsystem-maps', 'delete',
|
||||
query=query)
|
||||
|
109
cinder/volume/drivers/netapp/dataontap/nvme_cmode.py
Normal file
109
cinder/volume/drivers/netapp/dataontap/nvme_cmode.py
Normal file
@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2023 NetApp, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume driver for NetApp Data ONTAP NVMe storage systems.
|
||||
"""
|
||||
|
||||
from cinder import interface
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.netapp.dataontap import nvme_library
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class NetAppCmodeNVMeDriver(driver.BaseVD):
|
||||
"""NetApp C-mode NVMe volume driver.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.0 - Initial driver
|
||||
|
||||
"""
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DRIVER_NAME = 'NetApp_NVMe_Cluster_direct'
|
||||
|
||||
# ThirdPartySystems wiki page
|
||||
CI_WIKI_NAME = "NetApp_CI"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppCmodeNVMeDriver, self).__init__(*args, **kwargs)
|
||||
self.library = nvme_library.NetAppNVMeStorageLibrary(
|
||||
self.DRIVER_NAME, 'NVMe', **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def get_driver_options():
|
||||
return na_opts.netapp_cluster_opts
|
||||
|
||||
def do_setup(self, context):
|
||||
self.library.do_setup(context)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
self.library.check_for_setup_error()
|
||||
|
||||
def create_volume(self, volume):
|
||||
return self.library.create_volume(volume)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
return self.library.create_volume_from_snapshot(volume, snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
return self.library.create_cloned_volume(volume, src_vref)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
self.library.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
self.library.create_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.library.delete_snapshot(snapshot)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
return self.library.get_volume_stats(refresh,
|
||||
self.get_filter_function(),
|
||||
self.get_goodness_function())
|
||||
|
||||
def get_default_filter_function(self):
|
||||
return self.library.get_default_filter_function()
|
||||
|
||||
def get_default_goodness_function(self):
|
||||
return self.library.get_default_goodness_function()
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
self.library.extend_volume(volume, new_size)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
return self.library.ensure_export(context, volume)
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
return self.library.create_export(context, volume)
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
self.library.remove_export(context, volume)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
conn_info = self.library.initialize_connection(volume, connector)
|
||||
return conn_info
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
conn_info = self.library.terminate_connection(volume, connector,
|
||||
**kwargs)
|
||||
return conn_info
|
||||
|
||||
def get_pool(self, volume):
|
||||
return self.library.get_pool(volume)
|
765
cinder/volume/drivers/netapp/dataontap/nvme_library.py
Normal file
765
cinder/volume/drivers/netapp/dataontap/nvme_library.py
Normal file
@ -0,0 +1,765 @@
|
||||
# Copyright (c) 2023 NetApp, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume driver library for NetApp C-mode NVMe storage systems.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NetAppNamespace(object):
|
||||
"""Represents a namespace on NetApp storage."""
|
||||
|
||||
def __init__(self, handle, name, size, metadata_dict):
|
||||
self.handle = handle
|
||||
self.name = name
|
||||
self.size = size
|
||||
self.metadata = metadata_dict or {}
|
||||
|
||||
def get_metadata_property(self, prop):
|
||||
"""Get the metadata property of a namespace."""
|
||||
if prop in self.metadata:
|
||||
return self.metadata[prop]
|
||||
name = self.name
|
||||
LOG.debug("No metadata property %(prop)s defined for the namespace "
|
||||
"%(name)s", {'prop': prop, 'name': name})
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return ('NetApp namespace [handle:%s, name:%s, size:%s, metadata:%s]'
|
||||
% (self.handle, self.name, self.size, self.metadata))
|
||||
|
||||
|
||||
@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
|
||||
class NetAppNVMeStorageLibrary(object):
|
||||
"""NetApp NVMe storage library for Data ONTAP."""
|
||||
|
||||
# do not increment this as it may be used in volume type definitions.
|
||||
VERSION = "1.0.0"
|
||||
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
|
||||
'netapp_server_hostname']
|
||||
ALLOWED_NAMESPACE_OS_TYPES = ['aix', 'linux', 'vmware', 'windows']
|
||||
ALLOWED_SUBSYSTEM_HOST_TYPES = ['aix', 'linux', 'vmware', 'windows']
|
||||
DEFAULT_NAMESPACE_OS = 'linux'
|
||||
DEFAULT_HOST_TYPE = 'linux'
|
||||
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
|
||||
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
|
||||
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
|
||||
NVME_PORT = 4420
|
||||
NVME_TRANSPORT = "tcp"
|
||||
|
||||
def __init__(self, driver_name, driver_protocol, **kwargs):
|
||||
|
||||
na_utils.validate_instantiation(**kwargs)
|
||||
|
||||
self.driver_name = driver_name
|
||||
self.driver_protocol = driver_protocol
|
||||
self.rest_client = None
|
||||
self._stats = {}
|
||||
self.namespace_table = {}
|
||||
self.namespace_ostype = None
|
||||
self.host_type = None
|
||||
self.app_version = kwargs.get("app_version", "unknown")
|
||||
self.host = kwargs.get('host')
|
||||
self.backend_name = self.host.split('@')[1]
|
||||
|
||||
self.configuration = kwargs['configuration']
|
||||
self.configuration.append_config_values(na_opts.netapp_connection_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_transport_opts)
|
||||
self.configuration.append_config_values(
|
||||
na_opts.netapp_provisioning_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_san_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
|
||||
|
||||
self.max_over_subscription_ratio = (
|
||||
volume_utils.get_max_over_subscription_ratio(
|
||||
self.configuration.max_over_subscription_ratio,
|
||||
supports_auto=True))
|
||||
self.reserved_percentage = self.configuration.reserved_percentage
|
||||
self.loopingcalls = loopingcalls.LoopingCalls()
|
||||
|
||||
def do_setup(self, context):
|
||||
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
|
||||
self.namespace_ostype = (self.configuration.netapp_namespace_ostype
|
||||
or self.DEFAULT_NAMESPACE_OS)
|
||||
self.host_type = (self.configuration.netapp_host_type
|
||||
or self.DEFAULT_HOST_TYPE)
|
||||
|
||||
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
|
||||
|
||||
# NOTE(felipe_rodrigues): NVMe driver is only available with
|
||||
# REST client.
|
||||
self.client = dot_utils.get_client_for_backend(
|
||||
self.backend_name, force_rest=True)
|
||||
self.vserver = self.client.vserver
|
||||
|
||||
# Storage service catalog.
|
||||
self.ssc_library = capabilities.CapabilitiesLibrary(
|
||||
self.driver_protocol, self.vserver, self.client,
|
||||
self.configuration)
|
||||
|
||||
self.ssc_library.check_api_permissions()
|
||||
|
||||
self.using_cluster_credentials = (
|
||||
self.ssc_library.cluster_user_supported())
|
||||
|
||||
# Performance monitoring library.
|
||||
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
|
||||
self.client)
|
||||
|
||||
def _update_ssc(self):
|
||||
"""Refresh the storage service catalog with the latest set of pools."""
|
||||
|
||||
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
|
||||
|
||||
def _get_flexvol_to_pool_map(self):
|
||||
"""Get the flexvols that match the pool name search pattern.
|
||||
|
||||
The map is of the format suitable for seeding the storage service
|
||||
catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
|
||||
"""
|
||||
|
||||
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
|
||||
|
||||
pools = {}
|
||||
flexvol_names = self.client.list_flexvols()
|
||||
|
||||
for flexvol_name in flexvol_names:
|
||||
|
||||
msg_args = {
|
||||
'flexvol': flexvol_name,
|
||||
'vol_pattern': pool_regex.pattern,
|
||||
}
|
||||
|
||||
if pool_regex.match(flexvol_name):
|
||||
msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
|
||||
LOG.debug(msg, msg_args)
|
||||
pools[flexvol_name] = {'pool_name': flexvol_name}
|
||||
else:
|
||||
msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
|
||||
LOG.debug(msg, msg_args)
|
||||
|
||||
return pools
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that the driver is working and can communicate.
|
||||
|
||||
Discovers the namespaces on the NetApp server.
|
||||
"""
|
||||
if not self._get_flexvol_to_pool_map():
|
||||
msg = _('No pools are available for provisioning volumes. '
|
||||
'Ensure that the configuration option '
|
||||
'netapp_pool_name_search_pattern is set correctly.')
|
||||
raise na_utils.NetAppDriverException(msg)
|
||||
self._add_looping_tasks()
|
||||
|
||||
if self.namespace_ostype not in self.ALLOWED_NAMESPACE_OS_TYPES:
|
||||
msg = _("Invalid value for NetApp configuration"
|
||||
" option netapp_namespace_ostype.")
|
||||
LOG.error(msg)
|
||||
raise na_utils.NetAppDriverException(msg)
|
||||
if self.host_type not in self.ALLOWED_SUBSYSTEM_HOST_TYPES:
|
||||
msg = _("Invalid value for NetApp configuration"
|
||||
" option netapp_host_type.")
|
||||
LOG.error(msg)
|
||||
raise na_utils.NetAppDriverException(msg)
|
||||
|
||||
namespace_list = self.client.get_namespace_list()
|
||||
self._extract_and_populate_namespaces(namespace_list)
|
||||
LOG.debug("Success getting list of namespace from server.")
|
||||
|
||||
self.loopingcalls.start_tasks()
|
||||
|
||||
def _add_looping_tasks(self):
|
||||
"""Add tasks that need to be executed at a fixed interval.
|
||||
|
||||
Inheriting class overrides and then explicitly calls this method.
|
||||
"""
|
||||
# Note(cknight): Run the update once in the current thread to prevent a
|
||||
# race with the first invocation of _update_volume_stats.
|
||||
self._update_ssc()
|
||||
|
||||
# Add the task that updates the slow-changing storage service catalog.
|
||||
self.loopingcalls.add_task(self._update_ssc,
|
||||
loopingcalls.ONE_HOUR,
|
||||
loopingcalls.ONE_HOUR)
|
||||
|
||||
# Add the task that logs EMS messages.
|
||||
self.loopingcalls.add_task(
|
||||
self._handle_ems_logging,
|
||||
loopingcalls.ONE_HOUR)
|
||||
|
||||
def _handle_ems_logging(self):
|
||||
"""Log autosupport messages."""
|
||||
|
||||
base_ems_message = dot_utils.build_ems_log_message_0(
|
||||
self.driver_name, self.app_version)
|
||||
self.client.send_ems_log_message(base_ems_message)
|
||||
|
||||
pool_ems_message = dot_utils.build_ems_log_message_1(
|
||||
self.driver_name, self.app_version, self.vserver,
|
||||
self.ssc_library.get_ssc_flexvol_names(), [])
|
||||
self.client.send_ems_log_message(pool_ems_message)
|
||||
|
||||
def get_pool(self, volume):
|
||||
"""Return pool name where volume resides.
|
||||
|
||||
:param volume: The volume hosted by the driver.
|
||||
:return: Name of the pool where given volume is hosted.
|
||||
"""
|
||||
name = volume['name']
|
||||
metadata = self._get_namespace_attr(name, 'metadata') or dict()
|
||||
return metadata.get('Volume', None)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Driver entry point for creating a new volume (ONTAP namespace)."""
|
||||
|
||||
LOG.debug('create_volume on %s', volume['host'])
|
||||
|
||||
# get Data ONTAP volume name as pool name.
|
||||
pool_name = volume_utils.extract_host(volume['host'], level='pool')
|
||||
if pool_name is None:
|
||||
msg = _("Pool is not available in the volume host field.")
|
||||
raise exception.InvalidHost(reason=msg)
|
||||
|
||||
namespace = volume.name
|
||||
size = int(volume['size']) * units.Gi
|
||||
metadata = {'OsType': self.namespace_ostype,
|
||||
'Path': '/vol/%s/%s' % (pool_name, namespace)}
|
||||
|
||||
try:
|
||||
self.client.create_namespace(pool_name, namespace, size, metadata)
|
||||
except Exception:
|
||||
LOG.exception("Exception creating namespace %(name)s in pool "
|
||||
"%(pool)s.", {'name': namespace, 'pool': pool_name})
|
||||
msg = _("Volume %s could not be created.")
|
||||
raise exception.VolumeBackendAPIException(data=msg % namespace)
|
||||
LOG.debug('Created namespace with name %(name)s.', {'name': namespace})
|
||||
|
||||
metadata['Volume'] = pool_name
|
||||
metadata['Qtree'] = None
|
||||
handle = self._create_namespace_handle(metadata)
|
||||
self._add_namespace_to_table(
|
||||
NetAppNamespace(handle, namespace, size, metadata))
|
||||
|
||||
return
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Driver entry point for destroying existing volumes."""
|
||||
self._delete_namespace(volume['name'])
|
||||
|
||||
def _delete_namespace(self, namespace_name):
|
||||
"""Helper method to delete namespace backing a volume or snapshot."""
|
||||
|
||||
metadata = self._get_namespace_attr(namespace_name, 'metadata')
|
||||
if metadata:
|
||||
try:
|
||||
self.client.destroy_namespace(metadata['Path'])
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code in netapp_api.REST_NAMESPACE_EOBJECTNOTFOUND:
|
||||
LOG.warning("Failure deleting namespace %(name)s. "
|
||||
"%(message)s",
|
||||
{'name': namespace_name, 'message': e})
|
||||
else:
|
||||
error_message = (_('A NetApp Api Error occurred: %s') % e)
|
||||
raise na_utils.NetAppDriverException(error_message)
|
||||
self.namespace_table.pop(namespace_name)
|
||||
else:
|
||||
LOG.warning("No entry in namespace table for volume/snapshot"
|
||||
" %(name)s.", {'name': namespace_name})
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for an existing volume."""
|
||||
handle = self._get_namespace_attr(volume['name'], 'handle')
|
||||
return {'provider_location': handle}
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for a new volume."""
|
||||
handle = self._get_namespace_attr(volume['name'], 'handle')
|
||||
return {'provider_location': handle}
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Driver entry point to remove an export for a volume.
|
||||
|
||||
Since exporting is idempotent in this driver, we have nothing
|
||||
to do for unexporting.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Driver entry point for creating a snapshot.
|
||||
|
||||
This driver implements snapshots by using efficient single-file
|
||||
(namespace) cloning.
|
||||
"""
|
||||
self._create_snapshot(snapshot)
|
||||
|
||||
def _create_snapshot(self, snapshot):
|
||||
vol_name = snapshot['volume_name']
|
||||
snapshot_name = snapshot['name']
|
||||
namespace = self._get_namespace_from_table(vol_name)
|
||||
self._clone_namespace(namespace.name, snapshot_name)
|
||||
|
||||
def _clone_namespace(self, name, new_name):
|
||||
"""Clone namespace with the given handle to the new name."""
|
||||
metadata = self._get_namespace_attr(name, 'metadata')
|
||||
volume = metadata['Volume']
|
||||
|
||||
self.client.clone_namespace(volume, name, new_name)
|
||||
|
||||
LOG.debug("Cloned namespace with new name %s", new_name)
|
||||
namespace = self.client.get_namespace_by_args(
|
||||
vserver=self.vserver, path=f'/vol/{volume}/{new_name}')
|
||||
if len(namespace) == 0:
|
||||
msg = _("No cloned namespace named %s found on the filer.")
|
||||
raise exception.VolumeBackendAPIException(data=msg % new_name)
|
||||
|
||||
cloned_namespace = namespace[0]
|
||||
self._add_namespace_to_table(
|
||||
NetAppNamespace(
|
||||
f"{cloned_namespace['Vserver']}:{cloned_namespace['Path']}",
|
||||
new_name,
|
||||
cloned_namespace['Size'],
|
||||
cloned_namespace))
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Driver entry point for deleting a snapshot."""
|
||||
self._delete_namespace(snapshot['name'])
|
||||
LOG.debug("Snapshot %s deletion successful.", snapshot['name'])
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
source = {'name': snapshot['name'], 'size': snapshot['volume_size']}
|
||||
self._clone_source_to_destination(source, volume)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
src_namespace = self._get_namespace_from_table(src_vref['name'])
|
||||
source = {'name': src_namespace.name, 'size': src_vref['size']}
|
||||
self._clone_source_to_destination(source, volume)
|
||||
|
||||
def _clone_source_to_destination(self, source, destination_volume):
|
||||
source_size = source['size']
|
||||
destination_size = destination_volume['size']
|
||||
|
||||
source_name = source['name']
|
||||
destination_name = destination_volume['name']
|
||||
|
||||
try:
|
||||
self._clone_namespace(source_name, destination_name)
|
||||
|
||||
if destination_size != source_size:
|
||||
|
||||
try:
|
||||
self._extend_volume(destination_volume, destination_size)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Resizing %s failed. Cleaning volume.",
|
||||
destination_volume['id'])
|
||||
self.delete_volume(destination_volume)
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Exception cloning volume %(name)s from source "
|
||||
"volume %(source)s.",
|
||||
{'name': destination_name, 'source': source_name})
|
||||
|
||||
msg = _("Volume %s could not be created from source volume.")
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=msg % destination_name)
|
||||
|
||||
def _create_namespace_handle(self, metadata):
|
||||
"""Returns namespace handle based on filer type."""
|
||||
return '%s:%s' % (self.vserver, metadata['Path'])
|
||||
|
||||
def _extract_namespace_info(self, namespace):
|
||||
"""Extracts the namespace from API and populates the table."""
|
||||
|
||||
path = namespace['Path']
|
||||
(_rest, _splitter, name) = path.rpartition('/')
|
||||
handle = self._create_namespace_handle(namespace)
|
||||
size = namespace['Size']
|
||||
return NetAppNamespace(handle, name, size, namespace)
|
||||
|
||||
def _extract_and_populate_namespaces(self, api_namespaces):
|
||||
"""Extracts the namespaces from API and populates the table."""
|
||||
|
||||
for namespace in api_namespaces:
|
||||
discovered_namespace = self._extract_namespace_info(namespace)
|
||||
self._add_namespace_to_table(discovered_namespace)
|
||||
|
||||
def _add_namespace_to_table(self, namespace):
|
||||
"""Adds namespace to cache table."""
|
||||
if not isinstance(namespace, NetAppNamespace):
|
||||
msg = _("Object is not a NetApp namespace.")
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
self.namespace_table[namespace.name] = namespace
|
||||
|
||||
def _get_namespace_from_table(self, name):
|
||||
"""Gets namespace from cache table.
|
||||
|
||||
Refreshes cache if namespace not found in cache.
|
||||
"""
|
||||
namespace = self.namespace_table.get(name)
|
||||
if namespace is None:
|
||||
namespace_list = self.client.get_namespace_list()
|
||||
self._extract_and_populate_namespaces(namespace_list)
|
||||
namespace = self.namespace_table.get(name)
|
||||
if namespace is None:
|
||||
raise exception.VolumeNotFound(volume_id=name)
|
||||
return namespace
|
||||
|
||||
def _get_namespace_attr(self, name, attr):
|
||||
"""Get the namespace attribute if found else None."""
|
||||
try:
|
||||
attr = getattr(self._get_namespace_from_table(name), attr)
|
||||
return attr
|
||||
except exception.VolumeNotFound as e:
|
||||
LOG.error("Message: %s", e.msg)
|
||||
except Exception as e:
|
||||
LOG.error("Error getting namespace attribute. Exception: %s", e)
|
||||
return None
|
||||
|
||||
def get_volume_stats(self, refresh=False, filter_function=None,
|
||||
goodness_function=None):
|
||||
"""Get volume stats.
|
||||
|
||||
If 'refresh' is True, update the stats first.
|
||||
"""
|
||||
|
||||
if refresh:
|
||||
self._update_volume_stats(filter_function=filter_function,
|
||||
goodness_function=goodness_function)
|
||||
return self._stats
|
||||
|
||||
def _update_volume_stats(self, filter_function=None,
|
||||
goodness_function=None):
|
||||
"""Retrieve backend stats."""
|
||||
|
||||
LOG.debug('Updating volume stats')
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or self.driver_name
|
||||
data['vendor_name'] = 'NetApp'
|
||||
data['driver_version'] = self.VERSION
|
||||
data['storage_protocol'] = self.driver_protocol
|
||||
data['pools'] = self._get_pool_stats(
|
||||
filter_function=filter_function,
|
||||
goodness_function=goodness_function)
|
||||
data['sparse_copy_volume'] = True
|
||||
data['replication_enabled'] = False
|
||||
|
||||
self._stats = data
|
||||
|
||||
def _get_pool_stats(self, filter_function=None, goodness_function=None):
|
||||
"""Retrieve pool (Data ONTAP flexvol) stats.
|
||||
|
||||
Pool statistics are assembled from static driver capabilities, the
|
||||
Storage Service Catalog of flexvol attributes, and real-time capacity
|
||||
and controller utilization metrics. The pool name is the flexvol name.
|
||||
"""
|
||||
|
||||
pools = []
|
||||
|
||||
ssc = self.ssc_library.get_ssc()
|
||||
if not ssc:
|
||||
return pools
|
||||
|
||||
# Utilization and performance metrics require cluster-scoped
|
||||
# credentials
|
||||
if self.using_cluster_credentials:
|
||||
# Get up-to-date node utilization metrics just once
|
||||
self.perf_library.update_performance_cache(ssc)
|
||||
|
||||
# Get up-to-date aggregate capacities just once
|
||||
aggregates = self.ssc_library.get_ssc_aggregates()
|
||||
aggr_capacities = self.client.get_aggregate_capacities(
|
||||
aggregates)
|
||||
else:
|
||||
aggr_capacities = {}
|
||||
|
||||
for ssc_vol_name, ssc_vol_info in ssc.items():
|
||||
|
||||
pool = dict()
|
||||
|
||||
# Add storage service catalog data
|
||||
pool.update(ssc_vol_info)
|
||||
|
||||
# Add driver capabilities and config info
|
||||
pool['QoS_support'] = False
|
||||
pool['multiattach'] = False
|
||||
pool['online_extend_support'] = False
|
||||
pool['consistencygroup_support'] = False
|
||||
pool['consistent_group_snapshot_enabled'] = False
|
||||
pool['reserved_percentage'] = self.reserved_percentage
|
||||
pool['max_over_subscription_ratio'] = (
|
||||
self.max_over_subscription_ratio)
|
||||
|
||||
# Add up-to-date capacity info
|
||||
capacity = self.client.get_flexvol_capacity(
|
||||
flexvol_name=ssc_vol_name)
|
||||
|
||||
size_total_gb = capacity['size-total'] / units.Gi
|
||||
pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
|
||||
|
||||
size_available_gb = capacity['size-available'] / units.Gi
|
||||
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
|
||||
|
||||
if self.configuration.netapp_driver_reports_provisioned_capacity:
|
||||
namespaces = self.client.get_namespace_sizes_by_volume(
|
||||
ssc_vol_name)
|
||||
provisioned_cap = 0
|
||||
for namespace in namespaces:
|
||||
namespace_name = namespace['path'].split('/')[-1]
|
||||
# Filtering namespaces that matches the volume name
|
||||
# template to exclude snapshots.
|
||||
if volume_utils.extract_id_from_volume_name(
|
||||
namespace_name):
|
||||
provisioned_cap = provisioned_cap + namespace['size']
|
||||
pool['provisioned_capacity_gb'] = na_utils.round_down(
|
||||
float(provisioned_cap) / units.Gi)
|
||||
|
||||
if self.using_cluster_credentials:
|
||||
dedupe_used = self.client.get_flexvol_dedupe_used_percent(
|
||||
ssc_vol_name)
|
||||
else:
|
||||
dedupe_used = 0.0
|
||||
pool['netapp_dedupe_used_percent'] = na_utils.round_down(
|
||||
dedupe_used)
|
||||
|
||||
aggregate_name = ssc_vol_info.get('netapp_aggregate')
|
||||
aggr_capacity = aggr_capacities.get(aggregate_name, {})
|
||||
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
|
||||
'percent-used', 0)
|
||||
|
||||
# Add utilization data
|
||||
utilization = self.perf_library.get_node_utilization_for_pool(
|
||||
ssc_vol_name)
|
||||
pool['utilization'] = na_utils.round_down(utilization)
|
||||
pool['filter_function'] = filter_function
|
||||
pool['goodness_function'] = goodness_function
|
||||
|
||||
pools.append(pool)
|
||||
|
||||
return pools
|
||||
|
||||
def get_default_filter_function(self):
|
||||
"""Get the default filter_function string."""
|
||||
return self.DEFAULT_FILTER_FUNCTION
|
||||
|
||||
def get_default_goodness_function(self):
|
||||
"""Get the default goodness_function string."""
|
||||
return self.DEFAULT_GOODNESS_FUNCTION
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Driver entry point to increase the size of a volume."""
|
||||
self._extend_volume(volume, new_size)
|
||||
|
||||
def _extend_volume(self, volume, new_size):
|
||||
"""Extend an existing volume to the new size."""
|
||||
name = volume['name']
|
||||
namespace = self._get_namespace_from_table(name)
|
||||
path = namespace.metadata['Path']
|
||||
curr_size_bytes = str(namespace.size)
|
||||
new_size_bytes = str(int(new_size) * units.Gi)
|
||||
# Reused by clone scenarios.
|
||||
# Hence comparing the stored size.
|
||||
if curr_size_bytes == new_size_bytes:
|
||||
LOG.info("No need to extend volume %s"
|
||||
" as it is already the requested new size.", name)
|
||||
return
|
||||
|
||||
self.client.namespace_resize(path, new_size_bytes)
|
||||
|
||||
self.namespace_table[name].size = new_size_bytes
|
||||
|
||||
def _get_or_create_subsystem(self, host_nqn, host_os_type):
|
||||
"""Checks for an subsystem for a host.
|
||||
|
||||
Creates subsystem if not already present with given host os type and
|
||||
adds the host.
|
||||
"""
|
||||
# Backend supports different subsystems with the same hosts, so
|
||||
# instead of reusing non OpenStack subsystem, we make sure we only use
|
||||
# our own, thus being compatible with custom subsystem.
|
||||
subsystems = self.client.get_subsystem_by_host(
|
||||
host_nqn)
|
||||
if subsystems:
|
||||
subsystem_name = subsystems[0]['name']
|
||||
host_os_type = subsystems[0]['os_type']
|
||||
else:
|
||||
subsystem_name = na_utils.OPENSTACK_PREFIX + str(uuid.uuid4())
|
||||
self.client.create_subsystem(subsystem_name, host_os_type,
|
||||
host_nqn)
|
||||
|
||||
return subsystem_name, host_os_type
|
||||
|
||||
def _find_mapped_namespace_subsystem(self, path, host_nqn):
|
||||
"""Find an subsystem for a namespace mapped to the given host."""
|
||||
subsystems = [subsystem['name'] for subsystem in
|
||||
self.client.get_subsystem_by_host(host_nqn)]
|
||||
|
||||
# Map subsystem name to namespace-id for the requested host.
|
||||
namespace_map = {v['subsystem']: v['uuid']
|
||||
for v in self.client.get_namespace_map(path)
|
||||
if v['subsystem'] in subsystems}
|
||||
|
||||
subsystem_name = n_uuid = None
|
||||
# Give preference to OpenStack subsystems, just use the last one if not
|
||||
# present to allow unmapping old mappings that used a custom subsystem.
|
||||
for subsystem_name, n_uuid in namespace_map.items():
|
||||
if subsystem_name.startswith(na_utils.OPENSTACK_PREFIX):
|
||||
break
|
||||
|
||||
return subsystem_name, n_uuid
|
||||
|
||||
def _map_namespace(self, name, host_nqn):
|
||||
"""Maps namespace to the host nqn and returns its ID assigned."""
|
||||
|
||||
subsystem_name, subsystem_host_os = self._get_or_create_subsystem(
|
||||
host_nqn, self.host_type)
|
||||
if subsystem_host_os != self.host_type:
|
||||
LOG.warning("Namespace misalignment may occur for current"
|
||||
" subsystem %(sub_name)s with host OS type"
|
||||
" %(sub_os)s. Please configure subsystem manually"
|
||||
" according to the type of the host OS.",
|
||||
{'sub_name': subsystem_name,
|
||||
'sub_os': subsystem_host_os})
|
||||
|
||||
metadata = self._get_namespace_attr(name, 'metadata')
|
||||
path = metadata['Path']
|
||||
try:
|
||||
ns_uuid = self.client.map_namespace(
|
||||
path, subsystem_name,)
|
||||
return subsystem_name, ns_uuid
|
||||
except netapp_api.NaApiError:
|
||||
exc_info = sys.exc_info()
|
||||
(subsystem_name, ns_uuid) = self._find_mapped_namespace_subsystem(
|
||||
path, host_nqn)
|
||||
if ns_uuid is not None and subsystem_name:
|
||||
return subsystem_name, ns_uuid
|
||||
else:
|
||||
six.reraise(*exc_info)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection and returns connection info.
|
||||
|
||||
Assign any created volume to a compute node/host so that it can be
|
||||
used from that host. Example return values:
|
||||
|
||||
.. code-block:: default
|
||||
|
||||
{
|
||||
'driver_volume_type': 'nvmeof',
|
||||
'data': {
|
||||
'target_nqn' 'nqn.1992-01.example.com:subsystem',
|
||||
'host_nqn': 'nqn.1992-01.example.com:string',
|
||||
'portals': [
|
||||
('10.10.10.10', '4420', 'tcp')
|
||||
],
|
||||
'uuid': 'a1129e6f-8497-4c0c-be01-3eab1ba684ed'
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
host_nqn = connector.get("nqn")
|
||||
if not host_nqn:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_("Initialize connection error: no host nqn available!"))
|
||||
|
||||
name = volume['name']
|
||||
subsystem, namespace_uuid = self._map_namespace(name, host_nqn)
|
||||
|
||||
LOG.debug("Mapped namespace %(name)s to the host NQN %(host_nqn)s",
|
||||
{'name': name, 'host_nqn': host_nqn})
|
||||
|
||||
target_nqn = self.client.get_nvme_subsystem_nqn(subsystem)
|
||||
if not target_nqn:
|
||||
msg = _('Failed to get subsystem %(subsystem)s target NQN for the '
|
||||
'namespace %(name)s')
|
||||
msg_args = {'subsystem': subsystem, 'name': name}
|
||||
raise exception.VolumeBackendAPIException(data=msg % msg_args)
|
||||
|
||||
target_portals = self.client.get_nvme_target_portals()
|
||||
if not target_portals:
|
||||
msg = _('Failed to get target portals for the namespace %s')
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=msg % name)
|
||||
|
||||
portal = (target_portals[0], self.NVME_PORT, self.NVME_TRANSPORT)
|
||||
data = {
|
||||
"target_nqn": str(target_nqn),
|
||||
"host_nqn": host_nqn,
|
||||
"portals": [portal],
|
||||
"vol_uuid": namespace_uuid
|
||||
}
|
||||
conn_info = {"driver_volume_type": "nvmeof", "data": data}
|
||||
LOG.debug("Initialize connection info: %s", conn_info)
|
||||
|
||||
return conn_info
|
||||
|
||||
def _unmap_namespace(self, path, host_nqn):
|
||||
"""Unmaps a namespace from given host."""
|
||||
|
||||
namespace_unmap_list = []
|
||||
if host_nqn:
|
||||
(subsystem, _) = self._find_mapped_namespace_subsystem(
|
||||
path, host_nqn)
|
||||
namespace_unmap_list.append((path, subsystem))
|
||||
else:
|
||||
namespace_maps = self.client.get_namespace_map(path)
|
||||
namespace_unmap_list = [
|
||||
(path, m['subsystem']) for m in namespace_maps]
|
||||
|
||||
for _path, _subsystem in namespace_unmap_list:
|
||||
self.client.unmap_namespace(_path, _subsystem)
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Driver entry point to unattach a volume from an instance.
|
||||
|
||||
Unmask the namespace on the storage system so the given initiator can
|
||||
no longer access it.
|
||||
"""
|
||||
|
||||
name = volume['name']
|
||||
host_nqn = None
|
||||
if connector is None:
|
||||
LOG.debug('Unmapping namespace %(name)s from all hosts.',
|
||||
{'name': name})
|
||||
else:
|
||||
host_nqn = connector.get("nqn")
|
||||
LOG.debug("Unmapping namespace %(name)s from the host "
|
||||
"%(host_nqn)s", {'name': name, 'host_nqn': host_nqn})
|
||||
|
||||
metadata = self._get_namespace_attr(name, 'metadata')
|
||||
path = metadata['Path']
|
||||
self._unmap_namespace(path, host_nqn)
|
@ -62,11 +62,11 @@ def get_backend_configuration(backend_name):
|
||||
return config
|
||||
|
||||
|
||||
def get_client_for_backend(backend_name, vserver_name=None):
|
||||
def get_client_for_backend(backend_name, vserver_name=None, force_rest=False):
|
||||
"""Get a cDOT API client for a specific backend."""
|
||||
|
||||
config = get_backend_configuration(backend_name)
|
||||
if config.netapp_use_legacy_client:
|
||||
if config.netapp_use_legacy_client and not force_rest:
|
||||
client = client_cmode.Client(
|
||||
transport_type=config.netapp_transport_type,
|
||||
username=config.netapp_login,
|
||||
|
@ -39,7 +39,7 @@ netapp_proxy_opts = [
|
||||
'the only valid value is ontap_cluster for using '
|
||||
'clustered Data ONTAP.')),
|
||||
cfg.StrOpt('netapp_storage_protocol',
|
||||
choices=['iscsi', 'fc', 'nfs'],
|
||||
choices=['iscsi', 'fc', 'nfs', 'nvme'],
|
||||
help=('The storage protocol to be used on the data path with '
|
||||
'the storage system.')), ]
|
||||
|
||||
@ -158,6 +158,10 @@ netapp_san_opts = [
|
||||
help=('This option defines the type of operating system that'
|
||||
' will access a LUN exported from Data ONTAP; it is'
|
||||
' assigned to the LUN at the time it is created.')),
|
||||
cfg.StrOpt('netapp_namespace_ostype',
|
||||
help=('This option defines the type of operating system that'
|
||||
' will access a namespace exported from Data ONTAP; it is'
|
||||
' assigned to the namespace at the time it is created.')),
|
||||
cfg.StrOpt('netapp_host_type',
|
||||
help=('This option defines the type of operating system for'
|
||||
' all initiators that can access a LUN. This information'
|
||||
|
@ -6,7 +6,7 @@ The NetApp unified driver is a Block Storage driver that supports
|
||||
multiple storage families and protocols. Currently, the only storage
|
||||
family supported by this driver is the clustered Data ONTAP. The
|
||||
storage protocol refers to the protocol used to initiate data storage and
|
||||
access operations on those storage systems like iSCSI and NFS. The NetApp
|
||||
access operations on those storage systems like NVMe, iSCSI and NFS. The NetApp
|
||||
unified driver can be configured to provision and manage OpenStack volumes
|
||||
on a given storage family using a specified storage protocol.
|
||||
|
||||
@ -44,7 +44,7 @@ NetApp clustered Data ONTAP storage family
|
||||
The NetApp clustered Data ONTAP storage family represents a
|
||||
configuration group which provides Compute instances access to
|
||||
clustered Data ONTAP storage systems. At present it can be configured in
|
||||
Block Storage to work with iSCSI and NFS storage protocols.
|
||||
Block Storage to work with NVme, iSCSI and NFS storage protocols.
|
||||
|
||||
NetApp iSCSI configuration for clustered Data ONTAP
|
||||
---------------------------------------------------
|
||||
@ -107,6 +107,66 @@ setting the ``volume_driver``, ``netapp_storage_family`` and
|
||||
operational scenarios, visit the `NetApp OpenStack website
|
||||
<http://netapp.io/openstack/>`_.
|
||||
|
||||
NetApp NVMe/TCP configuration for clustered Data ONTAP
|
||||
------------------------------------------------------
|
||||
|
||||
The NetApp NVMe/TCP configuration for clustered Data ONTAP is an interface
|
||||
from OpenStack to clustered Data ONTAP storage systems. It provisions
|
||||
and manages the SAN block storage entity, which is a NetApp namespace that
|
||||
can be accessed using the NVMe/TCP protocol.
|
||||
|
||||
The NVMe/TCP configuration for clustered Data ONTAP is a direct interface
|
||||
from Block Storage to the clustered Data ONTAP instance and as
|
||||
such does not require additional management software to achieve the
|
||||
desired functionality. It uses NetApp APIs to interact with the
|
||||
clustered Data ONTAP instance.
|
||||
|
||||
**Configuration options**
|
||||
|
||||
Configure the volume driver, storage family, and storage protocol to the
|
||||
NetApp unified driver, clustered Data ONTAP, and NVMe respectively by
|
||||
setting the ``volume_driver``, ``netapp_storage_family`` and
|
||||
``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
|
||||
netapp_storage_family = ontap_cluster
|
||||
netapp_storage_protocol = nvme
|
||||
netapp_vserver = openstack-vserver
|
||||
netapp_server_hostname = myhostname
|
||||
netapp_server_port = port
|
||||
netapp_login = username
|
||||
netapp_password = password
|
||||
|
||||
.. note::
|
||||
|
||||
To use the NVMe/TCP protocol, you must override the default value of
|
||||
``netapp_storage_protocol`` with ``nvme``.
|
||||
Note that this is not the same value that is reported by the driver
|
||||
to the scheduler as `storage_protocol`, which is always
|
||||
``NVMe`` (case sensitive).
|
||||
|
||||
.. note::
|
||||
|
||||
If you specify an account in the ``netapp_login`` that only has
|
||||
virtual storage server (Vserver) administration privileges (rather
|
||||
than cluster-wide administration privileges), some advanced features
|
||||
of the NetApp unified driver will not work and you may see warnings
|
||||
in the Block Storage logs.
|
||||
|
||||
.. note::
|
||||
|
||||
The driver only supports the minimal Cinder driver features: create/delete
|
||||
volume and snapshots, extend volume, attack/detach volume, create volume
|
||||
from volume and create volume from image/snapshot.
|
||||
|
||||
.. tip::
|
||||
|
||||
For more information on these options and other deployment and
|
||||
operational scenarios, visit the `NetApp OpenStack website
|
||||
<http://netapp.io/openstack/>`_.
|
||||
|
||||
NetApp NFS configuration for clustered Data ONTAP
|
||||
-------------------------------------------------
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
* - ``netapp_password`` = ``None``
|
||||
- (String) Password for the administrative user account specified in the netapp_login option.
|
||||
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
|
||||
- (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
|
||||
- (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use NVMe, iSCSI or FC.
|
||||
* - ``netapp_replication_aggregate_map`` = ``None``
|
||||
- (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:<name_of_replication_device_section>,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,...
|
||||
* - ``netapp_server_hostname`` = ``None``
|
||||
|
@ -151,7 +151,7 @@ title=NEC Storage M Series Driver (iSCSI, FC)
|
||||
title=NEC Storage V Series Driver (iSCSI, FC)
|
||||
|
||||
[driver.netapp_ontap]
|
||||
title=NetApp Data ONTAP Driver (iSCSI, NFS, FC)
|
||||
title=NetApp Data ONTAP Driver (iSCSI, NFS, FC, NVMe/TCP)
|
||||
|
||||
[driver.netapp_solidfire]
|
||||
title=NetApp Solidfire Driver (iSCSI)
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added NVMe/TCP volume driver for NetApp ONTAP Storage Cluster.
|
Loading…
Reference in New Issue
Block a user