Add KIOXIA KumoScale NVMeOF driver
* Supported Protocol - NVMeOF * Supported Feature - Volume Create/Delete - Volume Attach/Detach - Snapshot Create/Delete - Create Volume from Snapshot - Get Volume Stats - Copy Image to Volume - Copy Volume to Image - Clone Volume - Extend Volume ThirdPartySystems: KIOXIA CI Implements: blueprint kumoscale-driver Change-Id: Ibaae35b02ccc254a8a0202ec1635f62ab18486ea
This commit is contained in:
parent
5d3fe0a7f4
commit
5b3304cfcc
@ -125,6 +125,8 @@ from cinder.volume.drivers.inspur.instorage import instorage_iscsi as \
|
||||
cinder_volume_drivers_inspur_instorage_instorageiscsi
|
||||
from cinder.volume.drivers.kaminario import kaminario_common as \
|
||||
cinder_volume_drivers_kaminario_kaminariocommon
|
||||
from cinder.volume.drivers.kioxia import kumoscale as \
|
||||
cinder_volume_drivers_kioxia_kumoscale
|
||||
from cinder.volume.drivers.lenovo import lenovo_common as \
|
||||
cinder_volume_drivers_lenovo_lenovocommon
|
||||
from cinder.volume.drivers import linstordrv as \
|
||||
@ -268,6 +270,7 @@ def list_opts():
|
||||
instorage_mcs_opts,
|
||||
cinder_volume_drivers_inspur_instorage_instorageiscsi.
|
||||
instorage_mcs_iscsi_opts,
|
||||
cinder_volume_drivers_kioxia_kumoscale.KUMOSCALE_OPTS,
|
||||
cinder_volume_drivers_open_e_options.jdss_connection_opts,
|
||||
cinder_volume_drivers_open_e_options.jdss_iscsi_opts,
|
||||
cinder_volume_drivers_open_e_options.jdss_volume_opts,
|
||||
|
768
cinder/tests/unit/volume/drivers/test_kioxia.py
Normal file
768
cinder/tests/unit/volume/drivers/test_kioxia.py
Normal file
@ -0,0 +1,768 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from oslo_utils.secretutils import md5
|
||||
|
||||
from cinder import exception
|
||||
from cinder.tests.unit import test
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.kioxia import entities
|
||||
from cinder.volume.drivers.kioxia import kumoscale as kioxia
|
||||
from cinder.volume.drivers.kioxia import rest_client
|
||||
|
||||
VOL_BACKEND_NAME = 'kioxia_kumoscale_1'
|
||||
VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2'
|
||||
VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba'
|
||||
VOL_SIZE = 10
|
||||
VOL_PROTOCOL = 'NVMeoF'
|
||||
SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77'
|
||||
CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da'
|
||||
CONN_HOST_NAME = 'devstack'
|
||||
CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \
|
||||
'beaae2de-3a97-4be1-a739-6ac4bc5bf138'
|
||||
success_prov_response = entities.ProvisionerResponse(None, None, "Success",
|
||||
"Success")
|
||||
fail_prov_response = entities.ProvisionerResponse(None, None, "Failure",
|
||||
"Failure")
|
||||
prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1')
|
||||
prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2')
|
||||
prov_location1 = entities.Location(VOL_UUID, prov_backend1)
|
||||
prov_location2 = entities.Location(VOL_UUID, prov_backend2)
|
||||
prov_volume = entities.VolumeProv(VOL_UUID, None, None, None,
|
||||
None, None, None, None, None, None,
|
||||
None, True, None, [prov_location1,
|
||||
prov_location2])
|
||||
prov_volumes_response = entities.ProvisionerResponse([prov_volume])
|
||||
no_entities_prov_response = entities.ProvisionerResponse([], None, "Success")
|
||||
|
||||
|
||||
class KioxiaVolumeTestCase(test.TestCase):
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
|
||||
@mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale')
|
||||
def setUp(self, mock_kumoscale, mock_get_info):
|
||||
mock_get_info.return_value = success_prov_response
|
||||
mock_kumoscale.return_value = \
|
||||
rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token')
|
||||
super(KioxiaVolumeTestCase, self).setUp()
|
||||
self.cfg = mock.Mock(spec=conf.Configuration)
|
||||
self.cfg.volume_backend_name = VOL_BACKEND_NAME
|
||||
self.cfg.url = 'dummyURL'
|
||||
self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE'
|
||||
self.cfg.cafile = 'dummy'
|
||||
self.cfg.num_replicas = 1
|
||||
self.cfg.block_size = 512
|
||||
self.cfg.max_iops_per_gb = 1000
|
||||
self.cfg.desired_iops_per_gb = 1000
|
||||
self.cfg.max_bw_per_gb = 1000
|
||||
self.cfg.desired_bw_per_gb = 1000
|
||||
self.cfg.same_rack_allowed = False
|
||||
self.cfg.max_replica_down_time = 5
|
||||
self.cfg.span_allowed = True
|
||||
self.cfg.vol_reserved_space_percentage = 20
|
||||
self.cfg.provisioning_type = 'THIN'
|
||||
self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg)
|
||||
self.driver.configuration.get = lambda *args, **kwargs: {}
|
||||
self.driver.num_replicas = 2
|
||||
self.expected_stats = {
|
||||
'volume_backend_name': VOL_BACKEND_NAME,
|
||||
'vendor_name': 'KIOXIA',
|
||||
'driver_version': self.driver.VERSION,
|
||||
'storage_protocol': 'NVMeOF',
|
||||
'consistencygroup_support': False,
|
||||
'thin_provisioning_support': True,
|
||||
'multiattach': False,
|
||||
'total_capacity_gb': 1000,
|
||||
'free_capacity_gb': 600
|
||||
}
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
|
||||
def test_get_kumoscale(self, mock_get_info):
|
||||
mock_get_info.return_value = success_prov_response
|
||||
result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token',
|
||||
'cert')
|
||||
self.assertEqual(result.mgmt_ips, ['1.2.3.4'])
|
||||
self.assertEqual(result.port, '8090')
|
||||
self.assertEqual(result.token, 'token')
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
|
||||
def test_volume_create_success(self, mock_create_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_create_volume.return_value = success_prov_response
|
||||
result = self.driver.create_volume(testvol)
|
||||
args, kwargs = mock_create_volume.call_args
|
||||
mock_call = args[0]
|
||||
self.assertEqual(mock_call.alias, testvol['name'][:27])
|
||||
self.assertEqual(mock_call.capacity, testvol['size'])
|
||||
self.assertEqual(mock_call.uuid, testvol['id'])
|
||||
self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
|
||||
def test_volume_create_failure(self, mock_create_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_create_volume.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, testvol)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
|
||||
def test_volume_create_exception(self, mock_create_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_create_volume.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, testvol)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
|
||||
def test_delete_volume_success(self, mock_delete_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_delete_volume.return_value = success_prov_response
|
||||
result = self.driver.delete_volume(testvol)
|
||||
mock_delete_volume.assert_any_call(testvol['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
|
||||
def test_delete_volume_failure(self, mock_delete_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_delete_volume.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_volume, testvol)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
|
||||
def test_delete_volume_exception(self, mock_delete_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_delete_volume.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_volume, testvol)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target1 = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target1])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
result = self.driver.initialize_connection(testvol, testconn)
|
||||
mock_host_probe.assert_any_call(testconn['nqn'],
|
||||
testconn['uuid'],
|
||||
testconn['host'],
|
||||
'Agent', 'cinder-driver-0.1', 30)
|
||||
mock_publish.assert_any_call(testconn['uuid'], testvol['id'])
|
||||
mock_get_volumes_by_uuid.assert_any_call(testvol['id'])
|
||||
mock_get_targets.assert_any_call(testconn['uuid'], testvol['id'])
|
||||
mock_get_backend_by_id.assert_any_call('dummy-pid-1')
|
||||
expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')],
|
||||
'target_nqn': 'target.nqn',
|
||||
'vol_uuid': testvol['id']}
|
||||
expected_data = {
|
||||
'vol_uuid': testvol['id'],
|
||||
'alias': testvol['name'],
|
||||
'writable': True,
|
||||
'volume_replicas': [expected_replica]
|
||||
}
|
||||
expected_result = {
|
||||
'driver_volume_type': 'nvmeof',
|
||||
'data': expected_data
|
||||
}
|
||||
self.assertDictEqual(result, expected_result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_host_probe_failure(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = fail_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_host_probe_exception(
|
||||
self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid,
|
||||
mock_get_targets, mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.side_effect = Exception()
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_publish_failure(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = fail_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_publish_exception(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.side_effect = Exception()
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_volumes_failure(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = fail_prov_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_no_volumes(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = no_entities_prov_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_volumes_exception(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.side_effect = Exception()
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_targets_failure(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = fail_prov_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_no_targets(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = no_entities_prov_response
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_targets_exception(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
|
||||
backend = BackendEntity([prov_portal])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.side_effect = Exception()
|
||||
mock_get_backend_by_id.return_value = \
|
||||
entities.ProvisionerResponse([backend])
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_backend_failure(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_no_backend(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.return_value = no_entities_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
|
||||
def test_initialize_connection_backend_exception(self, mock_host_probe,
|
||||
mock_publish,
|
||||
mock_get_volumes_by_uuid,
|
||||
mock_get_targets,
|
||||
mock_get_backend_by_id):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
prov_target = TargetEntity('target.nqn', prov_backend1)
|
||||
prov_targets_response = entities.ProvisionerResponse([prov_target])
|
||||
mock_publish.return_value = success_prov_response
|
||||
mock_host_probe.return_value = success_prov_response
|
||||
mock_get_volumes_by_uuid.return_value = prov_volumes_response
|
||||
mock_get_targets.return_value = prov_targets_response
|
||||
mock_get_backend_by_id.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.initialize_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
|
||||
def test_terminate_connection(self, mock_unpublish):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
mock_unpublish.return_value = success_prov_response
|
||||
result = self.driver.terminate_connection(testvol, testconn)
|
||||
mock_unpublish.assert_any_call(testconn['uuid'], testvol['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
|
||||
def test_terminate_connection_unpublish_failure(self, mock_unpublish):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
mock_unpublish.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.terminate_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
|
||||
def test_terminate_connection_unpublish_exception(self, mock_unpublish):
|
||||
testvol = _stub_volume()
|
||||
testconn = _stub_connector()
|
||||
mock_unpublish.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.terminate_connection, testvol, testconn)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
|
||||
def test_get_volume_stats(self, mock_get_tenants):
|
||||
tenant = TenantEntity(1000, 400)
|
||||
mock_get_tenants.return_value = entities.ProvisionerResponse([tenant])
|
||||
result = self.driver.get_volume_stats(True)
|
||||
mock_get_tenants.assert_any_call()
|
||||
self.assertDictEqual(result, self.expected_stats)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
|
||||
def test_get_volume_stats_tenants_failure(self, mock_get_tenants):
|
||||
mock_get_tenants.return_value = fail_prov_response
|
||||
self.expected_stats['total_capacity_gb'] = 'unknown'
|
||||
self.expected_stats['free_capacity_gb'] = 'unknown'
|
||||
self.assertDictEqual(
|
||||
self.driver.get_volume_stats(True), self.expected_stats)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
|
||||
def test_get_volume_stats_no_tenants(self, mock_get_tenants):
|
||||
mock_get_tenants.return_value = no_entities_prov_response
|
||||
self.expected_stats['total_capacity_gb'] = 'unknown'
|
||||
self.expected_stats['free_capacity_gb'] = 'unknown'
|
||||
self.assertDictEqual(
|
||||
self.driver.get_volume_stats(True), self.expected_stats)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
|
||||
def test_get_volume_stats_tenants_exception(self, mock_get_tenants):
|
||||
mock_get_tenants.side_effect = Exception()
|
||||
self.expected_stats['total_capacity_gb'] = 'unknown'
|
||||
self.expected_stats['free_capacity_gb'] = 'unknown'
|
||||
self.assertDictEqual(
|
||||
self.driver.get_volume_stats(True), self.expected_stats)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
|
||||
def test_create_snapshot_success(self, mock_create_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_create_snapshot.return_value = success_prov_response
|
||||
result = self.driver.create_snapshot(testsnap)
|
||||
args, kwargs = mock_create_snapshot.call_args
|
||||
mock_call = args[0]
|
||||
self.assertEqual(mock_call.alias, testsnap['name'])
|
||||
self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
|
||||
self.assertEqual(mock_call.snapshotID, testsnap['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
|
||||
def test_create_snapshot_failure(self, mock_create_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_create_snapshot.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_snapshot, testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
|
||||
def test_create_snapshot_exception(self, mock_create_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_create_snapshot.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_snapshot, testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
|
||||
def test_delete_snapshot_success(self, mock_delete_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_delete_snapshot.return_value = success_prov_response
|
||||
result = self.driver.delete_snapshot(testsnap)
|
||||
mock_delete_snapshot.assert_any_call(testsnap['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
|
||||
def test_delete_snapshot_failure(self, mock_delete_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_delete_snapshot.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_snapshot, testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
|
||||
def test_delete_snapshot_exception(self, mock_delete_snapshot):
|
||||
testsnap = _stub_snapshot()
|
||||
mock_delete_snapshot.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_snapshot, testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
|
||||
def test_create_volume_from_snapshot_success(self,
|
||||
mock_create_snapshot_volume):
|
||||
testsnap = _stub_snapshot()
|
||||
testvol = _stub_volume()
|
||||
mock_create_snapshot_volume.return_value = success_prov_response
|
||||
result = self.driver.create_volume_from_snapshot(testvol, testsnap)
|
||||
args, kwargs = mock_create_snapshot_volume.call_args
|
||||
mock_call = args[0]
|
||||
self.assertEqual(mock_call.alias, testvol['name'])
|
||||
self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
|
||||
self.assertEqual(mock_call.snapshotID, testsnap['id'])
|
||||
self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
|
||||
def test_create_volume_from_snapshot_failure(self,
|
||||
mock_create_snapshot_volume):
|
||||
testsnap = _stub_snapshot()
|
||||
testvol = _stub_volume()
|
||||
mock_create_snapshot_volume.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume_from_snapshot, testvol,
|
||||
testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
|
||||
def test_create_volume_from_snapshot_exception(
|
||||
self, mock_create_snapshot_volume):
|
||||
testsnap = _stub_snapshot()
|
||||
testvol = _stub_volume()
|
||||
mock_create_snapshot_volume.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume_from_snapshot, testvol,
|
||||
testsnap)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
|
||||
def test_extend_volume_success(self, mock_expand_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_expand_volume.return_value = success_prov_response
|
||||
new_size = VOL_SIZE + 2
|
||||
result = self.driver.extend_volume(testvol, new_size)
|
||||
mock_expand_volume.assert_any_call(new_size, testvol['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
|
||||
def test_extend_volume_failure(self, mock_expand_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_expand_volume.return_value = fail_prov_response
|
||||
new_size = VOL_SIZE + 2
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.extend_volume, testvol, new_size)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
|
||||
def test_extend_volume_exception(self, mock_expand_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_expand_volume.side_effect = Exception()
|
||||
new_size = VOL_SIZE + 2
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.extend_volume, testvol, new_size)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
|
||||
def test_create_cloned_volume_success(self, mock_clone_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_clone_volume.return_value = success_prov_response
|
||||
result = self.driver.create_cloned_volume(testvol, testvol)
|
||||
args, kwargs = mock_clone_volume.call_args
|
||||
mock_call = args[0]
|
||||
self.assertEqual(mock_call.alias, testvol['name'])
|
||||
self.assertEqual(mock_call.capacity, testvol['size'])
|
||||
self.assertEqual(mock_call.volumeId, testvol['id'])
|
||||
self.assertEqual(mock_call.sourceVolumeId, testvol['id'])
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
|
||||
def test_create_cloned_volume_failure(self, mock_clone_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_clone_volume.return_value = fail_prov_response
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_cloned_volume, testvol, testvol)
|
||||
|
||||
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
|
||||
def test_create_cloned_volume_exception(self, mock_clone_volume):
|
||||
testvol = _stub_volume()
|
||||
mock_clone_volume.side_effect = Exception()
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_cloned_volume, testvol, testvol)
|
||||
|
||||
def test_convert_host_name(self):
|
||||
name = 'ks-node3-000c2960a794-000c2960a797'
|
||||
result = self.driver._convert_host_name(name)
|
||||
expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest()
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_create_export(self):
|
||||
result = self.driver.create_export(None, None, None)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_ensure_export(self):
|
||||
result = self.driver.ensure_export(None, None)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_remove_export(self):
|
||||
result = self.driver.remove_export(None, None)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
result = self.driver.check_for_setup_error()
|
||||
self.assertIsNone(result)
|
||||
|
||||
|
||||
def _stub_volume(*args, **kwargs):
|
||||
volume = {'id': kwargs.get('id', VOL_UUID),
|
||||
'name': kwargs.get('name', VOL_NAME),
|
||||
'project_id': "test-project",
|
||||
'display_name': kwargs.get('display_name', VOL_NAME),
|
||||
'size': kwargs.get('size', VOL_SIZE),
|
||||
'provider_location': kwargs.get('provider_location', None),
|
||||
'volume_type_id': kwargs.get('volume_type_id', None)}
|
||||
return volume
|
||||
|
||||
|
||||
def _stub_connector(*args, **kwargs):
|
||||
connector = {'uuid': kwargs.get('uuid', CONN_UUID),
|
||||
'nqn': kwargs.get('nqn', CONN_NQN),
|
||||
'host': kwargs.get('host', CONN_HOST_NAME)}
|
||||
return connector
|
||||
|
||||
|
||||
def _stub_snapshot(*args, **kwargs):
|
||||
volume = {'id': kwargs.get('id', SNAP_UUID),
|
||||
'name': kwargs.get('name', 'snap2000'),
|
||||
'volume_id': kwargs.get('id', VOL_UUID)}
|
||||
return volume
|
||||
|
||||
|
||||
class TenantEntity:
|
||||
def __init__(self, capacity, consumed):
|
||||
self.tenantId = '0'
|
||||
self.capacity = capacity
|
||||
self.consumedCapacity = consumed
|
||||
|
||||
|
||||
class TargetEntity:
|
||||
def __init__(self, name, backend):
|
||||
self.targetName = name
|
||||
self.backend = backend
|
||||
|
||||
|
||||
class BackendEntity:
|
||||
def __init__(self, portals):
|
||||
self.portals = portals
|
||||
|
||||
|
||||
class PortalEntity:
|
||||
def __init__(self, ip, port, transport):
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
self.transport = transport
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
467
cinder/volume/drivers/kioxia/entities.py
Normal file
467
cinder/volume/drivers/kioxia/entities.py
Normal file
@ -0,0 +1,467 @@
|
||||
# (c) Copyright Kioxia Corporation 2021 All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class JsonClass(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def to_json(self):
|
||||
return json.dumps(
|
||||
self,
|
||||
default=lambda o: o.__dict__,
|
||||
sort_keys=True,
|
||||
indent=4)
|
||||
|
||||
def __str__(self):
|
||||
return ', '.join(['{key}={value}'.format(
|
||||
key=key, value=self.__dict__.get(key)) for key in self.__dict__])
|
||||
|
||||
def __getattr__(self, item):
|
||||
return "N/A"
|
||||
|
||||
def set_items(self, json_object):
|
||||
json_keys = json_object.keys()
|
||||
for key in json_keys:
|
||||
if not isinstance(json_object[key], 'dict'):
|
||||
self.__dict__[key] = json_object[key]
|
||||
|
||||
|
||||
class ProvisionerResponse(JsonClass):
|
||||
#
|
||||
# Provisioner response data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
prov_entities,
|
||||
res_id=None,
|
||||
status=None,
|
||||
description=None,
|
||||
path=None):
|
||||
JsonClass.__init__(self)
|
||||
self.prov_entities = prov_entities
|
||||
self.resID = res_id
|
||||
self.status = "Success" if status is None else status
|
||||
self.description = self.status if description is None else description
|
||||
self.path = path
|
||||
|
||||
def __str__(self):
|
||||
items = ""
|
||||
if self.prov_entities:
|
||||
num_of_entities = len(self.prov_entities)
|
||||
if num_of_entities == 1:
|
||||
items = self.prov_entities[0]
|
||||
else:
|
||||
items = num_of_entities
|
||||
return "(" + str(items) + ", " + str(self.resID) + ", " + \
|
||||
str(self.status) + ", " + str(self.description) + ")"
|
||||
|
||||
|
||||
class ProvisionerInfo(JsonClass):
|
||||
#
|
||||
# Provisioner Info data
|
||||
#
|
||||
|
||||
def __init__(self, totalFreeSpace, version, syslogsBackend=None):
|
||||
self.totalFreeSpace = totalFreeSpace
|
||||
self.version = version
|
||||
self.syslogsBackend = syslogsBackend
|
||||
|
||||
|
||||
class Backend(JsonClass):
|
||||
#
|
||||
# Backend data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mgmt_ips=None,
|
||||
rack=None,
|
||||
region=None,
|
||||
zone=None,
|
||||
persistentID=None,
|
||||
inUse=None,
|
||||
hostId=None,
|
||||
state=None,
|
||||
totalCapacity=None,
|
||||
availableCapacity=None,
|
||||
lastProbTime=None,
|
||||
probeInterval=None,
|
||||
totalBW=None,
|
||||
availableBW=None,
|
||||
totalIOPS=None,
|
||||
availableIOPS=None):
|
||||
self.mgmtIPs = mgmt_ips
|
||||
self.rack = rack
|
||||
self.region = region
|
||||
self.zone = zone
|
||||
self.persistentID = persistentID
|
||||
self.inUse = inUse
|
||||
self.state = state
|
||||
self.totalCapacity = totalCapacity
|
||||
self.availableCapacity = availableCapacity
|
||||
self.lastProbTime = lastProbTime
|
||||
self.probeInterval = probeInterval
|
||||
self.totalBW = totalBW
|
||||
self.availableBW = availableBW
|
||||
self.totalIOPS = totalIOPS
|
||||
self.availableIOPS = availableIOPS
|
||||
self.hostId = hostId
|
||||
|
||||
|
||||
class Replica(JsonClass):
|
||||
#
|
||||
# Backend data
|
||||
#
|
||||
|
||||
def __init__(self, sameRackAllowed, racks, regions, zones):
|
||||
self.sameRackAllowed = sameRackAllowed
|
||||
self.racks = racks
|
||||
self.regions = regions
|
||||
self.zones = zones
|
||||
|
||||
|
||||
class Location(JsonClass):
|
||||
#
|
||||
# Location data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
uuid=None,
|
||||
backend=None,
|
||||
replicaState=None,
|
||||
currentStateTime=None):
|
||||
self.uuid = uuid
|
||||
self.backend = backend
|
||||
self.replicaState = replicaState
|
||||
self.currentStateTime = currentStateTime
|
||||
|
||||
|
||||
class VolumeProv(JsonClass):
|
||||
#
|
||||
# Provisioner Volume data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
uuid=None,
|
||||
alias=None,
|
||||
capacity=None,
|
||||
numReplicas=None,
|
||||
maxIOPS=None,
|
||||
desiredIOPS=None,
|
||||
maxBW=None,
|
||||
desiredBW=None,
|
||||
blockSize=None,
|
||||
maxReplicaDownTime=None,
|
||||
snapshotID=None,
|
||||
writable=None,
|
||||
reservedSpace=None,
|
||||
location=None):
|
||||
self.uuid = uuid
|
||||
self.alias = alias
|
||||
self.capacity = capacity
|
||||
self.numReplicas = numReplicas
|
||||
self.maxIOPS = maxIOPS
|
||||
self.desiredIOPS = desiredIOPS
|
||||
self.maxBW = maxBW
|
||||
self.desiredBW = desiredBW
|
||||
self.blockSize = blockSize
|
||||
self.maxReplicaDownTime = maxReplicaDownTime
|
||||
self.snapshotID = snapshotID
|
||||
self.writable = writable
|
||||
self.reservedSpacePercentage = reservedSpace
|
||||
self.location = location
|
||||
|
||||
|
||||
class StorageClass(JsonClass):
|
||||
#
|
||||
# Provisioner Storage Class
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
replicas,
|
||||
racks=None,
|
||||
regions=None,
|
||||
zones=None,
|
||||
blockSize=None,
|
||||
maxIOPSPerGB=None,
|
||||
desiredIOPSPerGB=None,
|
||||
maxBWPerGB=None,
|
||||
desiredBWPerGB=None,
|
||||
sameRackAllowed=None,
|
||||
maxReplicaDownTime=None,
|
||||
hostId=None,
|
||||
spanAllowed=None,
|
||||
name=None,
|
||||
shareSSDBetweenVolumes=None):
|
||||
self.numReplicas = replicas
|
||||
if racks is not None:
|
||||
self.racks = racks
|
||||
if regions is not None:
|
||||
self.regions = regions
|
||||
if zones is not None:
|
||||
self.zones = zones
|
||||
if blockSize is not None:
|
||||
self.blockSize = blockSize
|
||||
if maxIOPSPerGB is not None:
|
||||
self.maxIOPSPerGB = maxIOPSPerGB
|
||||
if desiredIOPSPerGB is not None:
|
||||
self.desiredIOPSPerGB = desiredIOPSPerGB
|
||||
if maxBWPerGB is not None:
|
||||
self.maxBWPerGB = maxBWPerGB
|
||||
if desiredBWPerGB is not None:
|
||||
self.desiredBWPerGB = desiredBWPerGB
|
||||
if sameRackAllowed is not None:
|
||||
self.sameRackAllowed = sameRackAllowed
|
||||
if maxReplicaDownTime is not None:
|
||||
self.maxReplicaDownTime = maxReplicaDownTime
|
||||
if hostId is not None:
|
||||
self.hostId = hostId
|
||||
if spanAllowed is not None:
|
||||
self.allowSpan = spanAllowed
|
||||
if name is not None:
|
||||
self.name = name
|
||||
if shareSSDBetweenVolumes is not None:
|
||||
self.shareSSDBetweenVolumes = shareSSDBetweenVolumes
|
||||
|
||||
|
||||
class VolumeCreate(JsonClass):
|
||||
#
|
||||
# Provisioner Volume data for Create operation
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alias,
|
||||
capacity,
|
||||
storage_class,
|
||||
prov_type,
|
||||
reserved_space=None,
|
||||
protocol=None,
|
||||
uuid=None):
|
||||
self.alias = alias
|
||||
self.capacity = capacity
|
||||
self.storageClass = storage_class
|
||||
self.provisioningType = prov_type
|
||||
if reserved_space is not None:
|
||||
self.reservedSpacePercentage = reserved_space
|
||||
if protocol is not None:
|
||||
self.protocol = protocol
|
||||
if uuid is not None:
|
||||
self.uuid = uuid
|
||||
|
||||
|
||||
class SyslogEntity(JsonClass):
|
||||
#
|
||||
# Syslog Entity object
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
url=None,
|
||||
state=None,
|
||||
useTls=None,
|
||||
certFileName=None):
|
||||
self.name = name
|
||||
self.url = url
|
||||
self.state = state
|
||||
self.useTls = useTls
|
||||
self.certFileName = certFileName
|
||||
|
||||
|
||||
class SnapshotCreate(JsonClass):
|
||||
#
|
||||
# Provisioner Snapshot data for Create operation
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alias,
|
||||
volumeID,
|
||||
reservedSpacePercentage=None,
|
||||
snapshotID=None):
|
||||
self.alias = alias
|
||||
self.volumeID = volumeID
|
||||
if reservedSpacePercentage is not None:
|
||||
self.reservedSpacePercentage = reservedSpacePercentage
|
||||
if snapshotID is not None:
|
||||
self.snapshotID = snapshotID
|
||||
|
||||
|
||||
class SnapshotEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Snapshot Entity data for Show operation
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alias=None,
|
||||
snapshotID=None,
|
||||
reservedSpace=None,
|
||||
volumeID=None,
|
||||
capacity=None,
|
||||
timestamp=None):
|
||||
self.alias = alias
|
||||
self.volumeID = volumeID
|
||||
self.reservedSpace = reservedSpace
|
||||
self.snapshotID = snapshotID
|
||||
self.capacity = capacity
|
||||
self.timestamp = timestamp
|
||||
|
||||
|
||||
class SnapshotVolumeCreate(JsonClass):
|
||||
#
|
||||
# Provisioner Snapshot Volume data for Create operation
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alias,
|
||||
snapshotID,
|
||||
writable,
|
||||
reservedSpacePercentage=None,
|
||||
volumeID=None,
|
||||
maxIOPSPerGB=None,
|
||||
maxBWPerGB=None,
|
||||
protocol=None,
|
||||
spanAllowed=None,
|
||||
storageClassName=None):
|
||||
self.alias = alias
|
||||
self.snapshotID = snapshotID
|
||||
self.writable = writable
|
||||
if reservedSpacePercentage is not None:
|
||||
self.reservedSpacePercentage = reservedSpacePercentage
|
||||
if volumeID is not None:
|
||||
self.volumeID = volumeID
|
||||
if maxIOPSPerGB is not None:
|
||||
self.maxIOPSPerGB = maxIOPSPerGB
|
||||
if maxBWPerGB is not None:
|
||||
self.maxBWPerGB = maxBWPerGB
|
||||
if protocol is not None:
|
||||
self.protocol = protocol
|
||||
if spanAllowed is not None:
|
||||
self.allowSpan = spanAllowed
|
||||
if storageClassName is not None:
|
||||
self.storageClassName = storageClassName
|
||||
|
||||
|
||||
class ForwardEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Forward Entity data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
loggingType,
|
||||
level,
|
||||
host,
|
||||
appName,
|
||||
message,
|
||||
parametersList):
|
||||
self.loggingType = loggingType
|
||||
self.level = level
|
||||
self.host = host
|
||||
self.appName = appName
|
||||
self.message = message
|
||||
self.parametersList = parametersList
|
||||
|
||||
|
||||
class LicenseEntity(JsonClass):
|
||||
#
|
||||
# Provisioner License Entity data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
license_type=None,
|
||||
expirationDate=None,
|
||||
maxBackends=None):
|
||||
self.type = license_type
|
||||
self.expirationDate = expirationDate
|
||||
self.maxBackends = maxBackends
|
||||
|
||||
|
||||
class HostEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Host Entity data
|
||||
#
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nqn=None,
|
||||
uuid=None,
|
||||
name=None,
|
||||
clientType=None,
|
||||
version=None,
|
||||
state=None,
|
||||
lastProbeTime=None,
|
||||
duration=None):
|
||||
self.nqn = nqn
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.clientType = clientType
|
||||
self.version = version
|
||||
self.state = state
|
||||
self.lastProbeTime = lastProbeTime
|
||||
self.duration = duration
|
||||
|
||||
|
||||
class TargetEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Target Entity data for Show operation
|
||||
#
|
||||
|
||||
def __init__(self, alias=None):
|
||||
self.alias = alias
|
||||
|
||||
|
||||
class TenantEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Tenant Entity data for Show operation
|
||||
#
|
||||
|
||||
def __init__(self, capacity, iops, bw, uuid=None, name=None):
|
||||
self.capacity = capacity
|
||||
self.totalIOPS = iops
|
||||
self.totalBW = bw
|
||||
if uuid is not None:
|
||||
self.tenantId = uuid
|
||||
if name is not None:
|
||||
self.name = name
|
||||
|
||||
|
||||
class CloneEntity(JsonClass):
|
||||
#
|
||||
# Provisioner Clone Entity data
|
||||
#
|
||||
|
||||
def __init__(self, sourceVolumeId, alias, volumeId=None,
|
||||
reservedSpacePercentage=None,
|
||||
capacity=None):
|
||||
self.sourceVolumeId = sourceVolumeId
|
||||
self.alias = alias
|
||||
if volumeId is not None:
|
||||
self.volumeId = volumeId
|
||||
if reservedSpacePercentage is not None:
|
||||
self.reservedSpacePercentage = reservedSpacePercentage
|
||||
if capacity is not None:
|
||||
self.capacity = capacity
|
490
cinder/volume/drivers/kioxia/kumoscale.py
Normal file
490
cinder/volume/drivers/kioxia/kumoscale.py
Normal file
@ -0,0 +1,490 @@
|
||||
# (c) Copyright Kioxia Corporation 2021 All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Volume driver for KIOXIA KumoScale NVMeOF storage system."""
|
||||
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils.secretutils import md5
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import interface
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.kioxia import entities
|
||||
from cinder.volume.drivers.kioxia import rest_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
KUMOSCALE_OPTS = [
|
||||
cfg.StrOpt("kioxia_url", help="KumoScale provisioner REST API URL"),
|
||||
cfg.StrOpt("kioxia_cafile", help="Cert for provisioner REST API SSL"),
|
||||
cfg.StrOpt("kioxia_token", help="KumoScale Provisioner auth token."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_num_replicas", default=1,
|
||||
help="Number of volume replicas."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_max_iops_per_gb", default=0, help="Upper limit for IOPS/GB."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_desired_iops_per_gb", default=0, help="Desired IOPS/GB."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_max_bw_per_gb", default=0,
|
||||
help="Upper limit for bandwidth in B/s per GB."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_desired_bw_per_gb", default=0,
|
||||
help="Desired bandwidth in B/s per GB."),
|
||||
cfg.BoolOpt(
|
||||
"kioxia_same_rack_allowed", default=False,
|
||||
help="Can more than one replica be allocated to same rack."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_block_size", default=4096,
|
||||
help="Volume block size in bytes - 512 or 4096 (Default)."),
|
||||
cfg.BoolOpt(
|
||||
"kioxia_writable", default=False,
|
||||
help="Volumes from snapshot writeable or not."),
|
||||
cfg.StrOpt(
|
||||
"kioxia_provisioning_type", default="THICK",
|
||||
choices=[
|
||||
('THICK', 'Thick provisioning'), ('THIN', 'Thin provisioning')],
|
||||
help="Thin or thick volume, Default thick."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_vol_reserved_space_percentage", default=0,
|
||||
help="Thin volume reserved capacity allocation percentage."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_snap_reserved_space_percentage", default=0,
|
||||
help="Percentage of the parent volume to be used for log."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_snap_vol_reserved_space_percentage", default=0,
|
||||
help="Writable snapshot percentage of parent volume used for log."),
|
||||
cfg.IntOpt(
|
||||
"kioxia_max_replica_down_time", default=0,
|
||||
help="Replicated volume max downtime for replica in minutes."),
|
||||
cfg.BoolOpt(
|
||||
"kioxia_span_allowed", default=True,
|
||||
help="Allow span - Default True."),
|
||||
cfg.BoolOpt(
|
||||
"kioxia_snap_vol_span_allowed", default=True,
|
||||
help="Allow span in snapshot volume - Default True.")
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(KUMOSCALE_OPTS)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class KumoScaleBaseVolumeDriver(driver.BaseVD):
|
||||
"""Performs volume management on KumoScale Provisioner.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.0 - Initial driver version.
|
||||
"""
|
||||
|
||||
VERSION = '1.0.0'
|
||||
CI_WIKI_NAME = 'KIOXIA_CI'
|
||||
SUPPORTED_REST_API_VERSIONS = ['1.0', '1.1']
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(KumoScaleBaseVolumeDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(KUMOSCALE_OPTS)
|
||||
self._backend_name = (
|
||||
self.configuration.volume_backend_name or self.__class__.__name__)
|
||||
self.kumoscale = self._get_kumoscale(
|
||||
self.configuration.safe_get("kioxia_url"),
|
||||
self.configuration.safe_get("kioxia_token"),
|
||||
self.configuration.safe_get("kioxia_cafile"))
|
||||
|
||||
self.num_replicas = self.configuration.safe_get("kioxia_num_replicas")
|
||||
self.same_rack_allowed = self.configuration.safe_get(
|
||||
"kioxia_same_rack_allowed")
|
||||
self.max_iops_per_gb = self.configuration.safe_get(
|
||||
"kioxia_max_iops_per_gb")
|
||||
self.desired_iops_per_gb = self.configuration.safe_get(
|
||||
"kioxia_desired_iops_per_gb")
|
||||
self.max_bw_per_gb = self.configuration.safe_get(
|
||||
"kioxia_max_bw_per_gb")
|
||||
self.desired_bw_per_gb = self.configuration.safe_get(
|
||||
"kioxia_desired_bw_per_gb")
|
||||
self.block_size = self.configuration.safe_get("kioxia_block_size")
|
||||
self.writable = self.configuration.safe_get("kioxia_writable")
|
||||
self.provisioning_type = self.configuration.safe_get(
|
||||
"kioxia_provisioning_type")
|
||||
self.vol_reserved_space_percentage = self.configuration.safe_get(
|
||||
"kioxia_vol_reserved_space_percentage")
|
||||
self.snap_vol_reserved_space_percentage = self.configuration.safe_get(
|
||||
"kioxia_snap_vol_reserved_space_percentage")
|
||||
self.snap_reserved_space_percentage = self.configuration.safe_get(
|
||||
"kioxia_snap_reserved_space_percentage")
|
||||
self.max_replica_down_time = self.configuration.safe_get(
|
||||
"kioxia_max_replica_down_time")
|
||||
self.span_allowed = self.configuration.safe_get("kioxia_span_allowed")
|
||||
self.snap_vol_span_allowed = self.configuration.safe_get(
|
||||
"kioxia_snap_vol_span_allowed")
|
||||
|
||||
@staticmethod
|
||||
def get_driver_options():
|
||||
return KUMOSCALE_OPTS
|
||||
|
||||
def _get_kumoscale(self, url, token, cert):
|
||||
"""Returns an initialized rest client"""
|
||||
url_strs = url.split(":")
|
||||
ip_str = url_strs[1]
|
||||
ip_strs = ip_str.split("//")
|
||||
ip = ip_strs[1]
|
||||
port = url_strs[2]
|
||||
kumoscale = rest_client.KioxiaProvisioner([ip], cert, token, port)
|
||||
return kumoscale
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create the volume"""
|
||||
volume_name = volume["name"]
|
||||
volume_uuid = volume["id"]
|
||||
volume_size = volume["size"]
|
||||
zone_list = None if 'availability_zone' not in volume else [
|
||||
volume['availability_zone']]
|
||||
|
||||
if self.num_replicas > 1 and len(volume_name) > 27:
|
||||
volume_name = volume_name[:27] # workaround for limitation
|
||||
storage_class = entities.StorageClass(
|
||||
self.num_replicas, None, None, zone_list, self.block_size,
|
||||
self.max_iops_per_gb, self.desired_iops_per_gb, self.max_bw_per_gb,
|
||||
self.desired_bw_per_gb, self.same_rack_allowed,
|
||||
self.max_replica_down_time, None, self.span_allowed)
|
||||
ks_volume = entities.VolumeCreate(
|
||||
volume_name, volume_size, storage_class, self.provisioning_type,
|
||||
self.vol_reserved_space_percentage, 'NVMeoF', volume_uuid)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.create_volume(ks_volume)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(volname)s creation exception: %(txt)s") %
|
||||
{'volname': volume_name, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete the volume"""
|
||||
volume_uuid = volume["id"]
|
||||
|
||||
try:
|
||||
result = self.kumoscale.delete_volume(volume_uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(voluuid)s deletion exception: %(txt)s") %
|
||||
{'voluuid': volume_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status not in ('Success', 'DeviceNotFound', 'NotExists'):
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
|
||||
snapshot_name = snapshot['name']
|
||||
snapshot_uuid = snapshot['id']
|
||||
volume_uuid = snapshot['volume_id']
|
||||
ks_snapshot = entities.SnapshotCreate(
|
||||
snapshot_name, volume_uuid,
|
||||
self.snap_reserved_space_percentage, snapshot_uuid)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.create_snapshot(ks_snapshot)
|
||||
except Exception as e:
|
||||
msg = (_("Snapshot %(snapname)s creation exception: %(txt)s") %
|
||||
{'snapname': snapshot_name, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
|
||||
snapshot_uuid = snapshot['id']
|
||||
|
||||
try:
|
||||
result = self.kumoscale.delete_snapshot(snapshot_uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Snapshot %(snapuuid)s deletion exception: %(txt)s") %
|
||||
{'snapuuid': snapshot_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status not in ('Success', 'DeviceNotFound', 'NotExists'):
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
|
||||
volume_name = volume["name"]
|
||||
volume_uuid = volume["id"]
|
||||
snapshot_uuid = snapshot["id"]
|
||||
if self.writable:
|
||||
reserved_space_percentage = self.snap_vol_reserved_space_percentage
|
||||
else:
|
||||
reserved_space_percentage = 0
|
||||
|
||||
ks_snapshot_volume = entities.SnapshotVolumeCreate(
|
||||
volume_name, snapshot_uuid, self.writable,
|
||||
reserved_space_percentage, volume_uuid,
|
||||
self.max_iops_per_gb, self.max_bw_per_gb, 'NVMeoF',
|
||||
self.snap_vol_span_allowed)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.create_snapshot_volume(ks_snapshot_volume)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(volname)s from snapshot exception: %(txt)s") %
|
||||
{'volname': volume_name, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def initialize_connection(self, volume, connector, initiator_data=None):
|
||||
"""Connect the initiator to a volume"""
|
||||
host_uuid = connector['uuid']
|
||||
ks_volume = None
|
||||
targets = []
|
||||
volume_replicas = []
|
||||
volume_uuid = volume['id']
|
||||
volume_name = volume['name']
|
||||
|
||||
try:
|
||||
result = self.kumoscale.host_probe(
|
||||
connector['nqn'], connector['uuid'],
|
||||
KumoScaleBaseVolumeDriver._convert_host_name(
|
||||
connector['host']),
|
||||
'Agent', 'cinder-driver-0.1', 30)
|
||||
except Exception as e:
|
||||
msg = (_("Host %(uuid)s host_probe exception: %(txt)s") %
|
||||
{'uuid': connector['uuid'], 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != 'Success':
|
||||
msg = (_("host_probe for %(uuid)s failed with %(txt)s") %
|
||||
{'uuid': connector['uuid'], 'txt': result.description})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.publish(host_uuid, volume_uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(voluuid)s publish exception: %(txt)s") %
|
||||
{'voluuid': volume_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != "Success" and result.status != 'AlreadyPublished':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.get_volumes_by_uuid(volume_uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(voluuid)s fetch exception: %(txt)s") %
|
||||
{'voluuid': volume_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status == "Success":
|
||||
if len(result.prov_entities) == 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_("Volume %s not found") % volume_uuid)
|
||||
else:
|
||||
ks_volume = result.prov_entities[0]
|
||||
else:
|
||||
msg = (_("get_volumes_by_uuid for %(uuid)s failed with %(txt)s") %
|
||||
{'uuid': volume_uuid, 'txt': result.description})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.get_targets(host_uuid, ks_volume.uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(voluuid)s get targets exception: %(txt)s") %
|
||||
{'voluuid': volume_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status == "Success":
|
||||
if len(result.prov_entities) == 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_("Volume %s targets not found") % ks_volume.uuid)
|
||||
else:
|
||||
targets = result.prov_entities
|
||||
|
||||
ks_volume_replicas = ks_volume.location
|
||||
for i in range(len(targets)):
|
||||
persistent_id = str(targets[i].backend.persistentID)
|
||||
|
||||
try:
|
||||
result = self.kumoscale.get_backend_by_id(persistent_id)
|
||||
except Exception as e:
|
||||
msg = (_("Backend %(backpid)s exception: %(txt)s") %
|
||||
{'backpid': persistent_id, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status == "Success":
|
||||
if len(result.prov_entities) == 0:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=_("Backend %s not found") % persistent_id)
|
||||
else:
|
||||
backend = result.prov_entities[0]
|
||||
else:
|
||||
msg = (_("get_backend_by_id for %(pid)s failed with %(txt)s") %
|
||||
{'pid': persistent_id, 'txt': result.description})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
str_portals = []
|
||||
for p in range(len(backend.portals)):
|
||||
portal = backend.portals[p]
|
||||
portal_ip = str(portal.ip)
|
||||
portal_port = str(portal.port)
|
||||
portal_transport = str(portal.transport)
|
||||
str_portals.append(
|
||||
(portal_ip, portal_port, portal_transport))
|
||||
|
||||
for j in range(len(ks_volume_replicas)):
|
||||
ks_replica = ks_volume_replicas[j]
|
||||
if str(ks_replica.backend.persistentID) == persistent_id:
|
||||
break
|
||||
|
||||
replica = dict()
|
||||
replica['vol_uuid'] = ks_replica.uuid
|
||||
replica['target_nqn'] = str(targets[i].targetName)
|
||||
replica['portals'] = str_portals
|
||||
|
||||
volume_replicas.append(replica)
|
||||
|
||||
if len(volume_replicas) > 1: # workaround for limitation
|
||||
volume_name = volume_name[:27]
|
||||
|
||||
data = {
|
||||
'vol_uuid': volume_uuid,
|
||||
'alias': volume_name,
|
||||
'writable': ks_volume.writable,
|
||||
'volume_replicas': volume_replicas
|
||||
}
|
||||
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
return {
|
||||
'driver_volume_type': 'nvmeof',
|
||||
'data': data
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _convert_host_name(name):
|
||||
if name is None:
|
||||
return ""
|
||||
if len(name) > 32:
|
||||
name = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest()
|
||||
else:
|
||||
name = name.replace('.', '-').lower()
|
||||
return name
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Terminate connection."""
|
||||
volume_uuid = volume['id']
|
||||
if connector:
|
||||
host_uuid = connector['uuid']
|
||||
else:
|
||||
host_uuid = None
|
||||
|
||||
try:
|
||||
result = self.kumoscale.unpublish(host_uuid, volume_uuid)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(voluuid)s unpublish exception: %(txt)s") %
|
||||
{'voluuid': volume_uuid, 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
if result.status != 'Success' and (
|
||||
result.status != 'VolumeNotPublished'):
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def _update_volume_stats(self):
|
||||
data = dict(
|
||||
volume_backend_name=self._backend_name,
|
||||
vendor_name='KIOXIA',
|
||||
driver_version=self.VERSION,
|
||||
storage_protocol='NVMeOF',
|
||||
)
|
||||
data['total_capacity_gb'] = 'unknown'
|
||||
data['free_capacity_gb'] = 'unknown'
|
||||
data['consistencygroup_support'] = False
|
||||
data['thin_provisioning_support'] = True
|
||||
data['multiattach'] = False
|
||||
|
||||
result = None
|
||||
tenants = []
|
||||
try:
|
||||
result = self.kumoscale.get_tenants()
|
||||
except Exception as e:
|
||||
msg = _("Get tenants exception: %s") % str(e)
|
||||
LOG.exception(msg)
|
||||
|
||||
if result and result.status == "Success":
|
||||
if len(result.prov_entities) == 0:
|
||||
LOG.error("No kumoscale tenants")
|
||||
else:
|
||||
tenants = result.prov_entities
|
||||
elif result:
|
||||
LOG.error("Get tenants API error: %s", result.description)
|
||||
|
||||
default_tenant = None
|
||||
for i in range(len(tenants)):
|
||||
if tenants[i].tenantId == "0":
|
||||
default_tenant = tenants[i]
|
||||
break
|
||||
|
||||
if default_tenant:
|
||||
total_capacity = default_tenant.capacity
|
||||
consumed_capacity = default_tenant.consumedCapacity
|
||||
free_capacity = total_capacity - consumed_capacity
|
||||
data['total_capacity_gb'] = total_capacity
|
||||
data['free_capacity_gb'] = free_capacity
|
||||
|
||||
self._stats = data
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
try:
|
||||
result = self.kumoscale.expand_volume(
|
||||
new_size, volume["id"])
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(volid)s expand exception: %(txt)s") %
|
||||
{'volid': volume["id"], 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
clone_entity = entities.CloneEntity(
|
||||
src_vref['id'], volume['name'],
|
||||
volumeId=volume['id'],
|
||||
capacity=volume['size'])
|
||||
try:
|
||||
result = self.kumoscale.clone_volume(clone_entity)
|
||||
except Exception as e:
|
||||
msg = (_("Volume %(volid)s clone exception: %(txt)s") %
|
||||
{'volid': volume["id"], 'txt': str(e)})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
if result.status != 'Success':
|
||||
raise exception.VolumeBackendAPIException(data=result.description)
|
||||
|
||||
def create_export(self, context, volume, connector):
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
1055
cinder/volume/drivers/kioxia/rest_client.py
Normal file
1055
cinder/volume/drivers/kioxia/rest_client.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,67 @@
|
||||
==============================
|
||||
KIOXIA Kumoscale NVMeOF Driver
|
||||
==============================
|
||||
|
||||
KIOXIA Kumoscale volume driver provides OpenStack Compute instances
|
||||
with access to KIOXIA Kumoscale NVMeOF storage systems.
|
||||
|
||||
This documentation explains how to configure Cinder for use with the
|
||||
KIOXIA Kumoscale storage backend system.
|
||||
|
||||
Driver options
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The following table contains the configuration options supported by the
|
||||
KIOXIA Kumoscale NVMeOF driver.
|
||||
|
||||
.. config-table::
|
||||
:config-target: KIOXIA Kumoscale
|
||||
|
||||
cinder.volume.drivers.kioxia.kumoscale
|
||||
|
||||
Supported operations
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Create, list, delete, attach and detach volumes
|
||||
- Create, list and delete volume snapshots
|
||||
- Create a volume from a snapshot
|
||||
- Copy an image to a volume.
|
||||
- Copy a volume to an image.
|
||||
- Create volume from snapshot
|
||||
- Clone a volume
|
||||
- Extend a volume
|
||||
|
||||
Configure KIOXIA Kumoscale NVMeOF backend
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section details the steps required to configure the KIOXIA Kumoscale
|
||||
storage cinder driver.
|
||||
|
||||
#. In the ``cinder.conf`` configuration file under the ``[DEFAULT]``
|
||||
section, set the enabled_backends parameter.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
enabled_backends = kumoscale-1
|
||||
|
||||
|
||||
#. Add a backend group section for the backend group specified
|
||||
in the enabled_backends parameter.
|
||||
|
||||
#. In the newly created backend group section, set the
|
||||
following configuration options:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[kumoscale-1]
|
||||
# Backend name
|
||||
volume_backend_name=kumoscale-1
|
||||
# The driver path
|
||||
volume_driver=cinder.volume.drivers.kioxia.kumoscale.KumoScaleBaseVolumeDriver
|
||||
# Kumoscale provisioner URL
|
||||
kioxia_url=https://70.0.0.13:30100
|
||||
# Kumoscale provisioner cert file
|
||||
kioxia_cafile=/etc/kioxia/ssdtoolbox.pem
|
||||
# Kumoscale provisioner token
|
||||
token=eyJhbGciOiJIUzI1NiJ9...
|
@ -114,6 +114,9 @@ title=Inspur AS13000 Storage Driver (iSCSI)
|
||||
[driver.kaminario]
|
||||
title=Kaminario Storage Driver (iSCSI, FC)
|
||||
|
||||
[driver.kioxia_kumoscale]
|
||||
title=Kioxia Kumoscale Driver (NVMeOF)
|
||||
|
||||
[driver.lenovo]
|
||||
title=Lenovo Storage Driver (FC, iSCSI)
|
||||
|
||||
@ -235,6 +238,7 @@ driver.infortrend=complete
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=complete
|
||||
driver.kaminario=complete
|
||||
driver.kioxia_kumoscale=complete
|
||||
driver.lenovo=complete
|
||||
driver.linbit_linstor=complete
|
||||
driver.lvm=complete
|
||||
@ -300,6 +304,7 @@ driver.infortrend=complete
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=complete
|
||||
driver.kaminario=complete
|
||||
driver.kioxia_kumoscale=complete
|
||||
driver.lenovo=complete
|
||||
driver.linbit_linstor=complete
|
||||
driver.lvm=complete
|
||||
@ -365,6 +370,7 @@ driver.infortrend=missing
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
@ -433,6 +439,7 @@ driver.infortrend=missing
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
@ -500,6 +507,7 @@ driver.infortrend=complete
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=complete
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
@ -568,6 +576,7 @@ driver.infortrend=missing
|
||||
driver.inspur=complete
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
@ -635,6 +644,7 @@ driver.infortrend=complete
|
||||
driver.inspur=missing
|
||||
driver.inspur_as13000=complete
|
||||
driver.kaminario=complete
|
||||
driver.kioxia_kumoscale=complete
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=complete
|
||||
@ -703,6 +713,7 @@ driver.infortrend=complete
|
||||
driver.inspur=missing
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
@ -771,6 +782,7 @@ driver.infortrend=complete
|
||||
driver.inspur=missing
|
||||
driver.inspur_as13000=complete
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=complete
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=complete
|
||||
@ -836,6 +848,7 @@ driver.infortrend=missing
|
||||
driver.inspur=missing
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=complete
|
||||
@ -905,6 +918,7 @@ driver.infortrend=missing
|
||||
driver.inspur=missing
|
||||
driver.inspur_as13000=missing
|
||||
driver.kaminario=missing
|
||||
driver.kioxia_kumoscale=missing
|
||||
driver.lenovo=missing
|
||||
driver.linbit_linstor=missing
|
||||
driver.lvm=missing
|
||||
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
New Cinder volume driver for KIOXIA Kumoscale storage systems.
|
||||
The driver storage system supports NVMeOF.
|
Loading…
Reference in New Issue
Block a user