LVM nvmet: Add support for shared subsystems
LVM target drivers usually only support unique subsystems/targets, so a specific subsystem/target is created for each volume. While this is good from a deployment point of view, it is insufficient from a testing perspective, since it limits the code paths that can be tested in os-brick. Being able to test these 2 different paths in os-brick is very important, because the shared case usually present very particular issues: Leftover devices caused by race conditions between nova and cinder, premature subsystem/target disconnection, not disconnecting subsystem/target, etc. Thanks to this patch we'll be able to increase the testing possibilities of the NVMe-oF os-brick connector to cover combinations of: - Different connection properties formats: old & new - Different target sharing: shared & non shared Change-Id: I396db66f72fbf1f31f279d4431c64c9004a1a665
This commit is contained in:
parent
daa803b8ee
commit
8e7ead7c27
@ -174,7 +174,6 @@ def privsep_setup(cls_name, *args, **kwargs):
|
||||
###################
|
||||
# Classes that don't currently have privsep support
|
||||
|
||||
Namespace = nvmet.Namespace
|
||||
Host = nvmet.Host
|
||||
Referral = nvmet.Referral
|
||||
ANAGroup = nvmet.ANAGroup
|
||||
@ -188,6 +187,18 @@ ANAGroup = nvmet.ANAGroup
|
||||
NotFound = nvmet.nvme.CFSNotFound
|
||||
|
||||
|
||||
class Namespace(nvmet.Namespace):
|
||||
def __init__(self, subsystem, nsid=None, mode='lookup'):
|
||||
super().__init__(subsystem=subsystem, nsid=nsid, mode=mode)
|
||||
|
||||
@classmethod
|
||||
def setup(cls, subsys, n, err_func=None):
|
||||
privsep_setup(cls.__name__, serialize(subsys), n, err_func)
|
||||
|
||||
def delete(self):
|
||||
do_privsep_call(serialize(self), 'delete')
|
||||
|
||||
|
||||
class Subsystem(nvmet.Subsystem):
|
||||
def __init__(self, nqn=None, mode='lookup'):
|
||||
super().__init__(nqn=nqn, mode=mode)
|
||||
@ -199,6 +210,11 @@ class Subsystem(nvmet.Subsystem):
|
||||
def delete(self):
|
||||
do_privsep_call(serialize(self), 'delete')
|
||||
|
||||
@property
|
||||
def namespaces(self):
|
||||
for d in os.listdir(self.path + '/namespaces/'):
|
||||
yield Namespace(self, os.path.basename(d))
|
||||
|
||||
|
||||
class Port(nvmet.Port):
|
||||
def __init__(self, portid, mode='lookup'):
|
||||
|
@ -30,7 +30,8 @@ except ImportError:
|
||||
Root=type('Root', (mock.Mock, ), {}),
|
||||
Subsystem=type('Subsystem', (mock.Mock, ), {}),
|
||||
Port=type('Port', (mock.Mock, ), {}),
|
||||
Namespace=type('Namespace', (mock.Mock, ), {}),
|
||||
Namespace=type('Namespace', (mock.Mock, ),
|
||||
{'MAX_NSID': 8192}),
|
||||
Host=type('Host', (mock.Mock, ), {}),
|
||||
ANAGroup=type('ANAGroup', (mock.Mock, ), {}),
|
||||
Referral=type('Referral', (mock.Mock, ), {}),
|
||||
|
@ -298,7 +298,7 @@ class TestPrivsep(test.TestCase):
|
||||
|
||||
@ddt.ddt
|
||||
class TestNvmetClasses(test.TestCase):
|
||||
@ddt.data('Namespace', 'Host', 'Referral', 'ANAGroup')
|
||||
@ddt.data('Host', 'Referral', 'ANAGroup')
|
||||
def test_same_classes(self, cls_name):
|
||||
self.assertEqual(getattr(nvmet, cls_name),
|
||||
getattr(nvmet.nvmet, cls_name))
|
||||
@ -330,6 +330,22 @@ class TestNvmetClasses(test.TestCase):
|
||||
mock_privsep.assert_called_once_with(mock_serialize.return_value,
|
||||
'delete')
|
||||
|
||||
@mock.patch('os.listdir',
|
||||
return_value=['/path/namespaces/1', '/path/namespaces/2'])
|
||||
@mock.patch.object(nvmet, 'Namespace')
|
||||
def test_subsystem_namespaces(self, mock_nss, mock_listdir):
|
||||
subsys = nvmet.Subsystem(mock.sentinel.nqn)
|
||||
subsys.path = '/path' # Set by the parent nvmet library Root class
|
||||
|
||||
res = list(subsys.namespaces)
|
||||
|
||||
self.assertEqual([mock_nss.return_value, mock_nss.return_value], res)
|
||||
|
||||
mock_listdir.assert_called_once_with('/path/namespaces/')
|
||||
self.assertEqual(2, mock_nss.call_count)
|
||||
mock_nss.assert_has_calls((mock.call(subsys, '1'),
|
||||
mock.call(subsys, '2')))
|
||||
|
||||
def test_port_init(self):
|
||||
port = nvmet.Port('portid')
|
||||
self.assertIsInstance(port, nvmet.nvmet.Port)
|
||||
@ -397,3 +413,30 @@ class TestNvmetClasses(test.TestCase):
|
||||
mock_listdir.assert_called_once_with('/path/ports/')
|
||||
self.assertEqual(2, mock_port.call_count)
|
||||
mock_port.assert_has_calls((mock.call('1'), mock.call('2')))
|
||||
|
||||
def test_namespace_init(self):
|
||||
ns = nvmet.Namespace('subsystem', 'nsid')
|
||||
self.assertIsInstance(ns, nvmet.nvmet.Namespace)
|
||||
self.assertIsInstance(ns, nvmet.Namespace)
|
||||
self.assertEqual('subsystem', ns.subsystem)
|
||||
self.assertEqual('nsid', ns.nsid)
|
||||
self.assertEqual('lookup', ns.mode)
|
||||
|
||||
@mock.patch.object(nvmet, 'serialize')
|
||||
@mock.patch.object(nvmet, 'privsep_setup')
|
||||
def test_namespace_setup(self, mock_setup, mock_serialize):
|
||||
nvmet.Namespace.setup(mock.sentinel.subsys,
|
||||
mock.sentinel.n)
|
||||
mock_serialize.assert_called_once_with(mock.sentinel.subsys)
|
||||
mock_setup.assert_called_once_with('Namespace',
|
||||
mock_serialize.return_value,
|
||||
mock.sentinel.n, None)
|
||||
|
||||
@mock.patch.object(nvmet, 'serialize')
|
||||
@mock.patch.object(nvmet, 'do_privsep_call')
|
||||
def test_namespace_delete(self, mock_privsep, mock_serialize):
|
||||
ns = nvmet.Namespace('subsystem', 'nsid')
|
||||
ns.delete()
|
||||
mock_serialize.assert_called_once_with(ns)
|
||||
mock_privsep.assert_called_once_with(mock_serialize.return_value,
|
||||
'delete')
|
||||
|
@ -72,7 +72,7 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
|
||||
'created_at': timeutils.utcnow(),
|
||||
'host': 'fake_host@lvm#lvm'})
|
||||
|
||||
@mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties')
|
||||
@mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties_from_vol')
|
||||
def test_initialize_connection(self, mock_get_conn):
|
||||
mock_connector = {'initiator': 'fake_init'}
|
||||
mock_testvol = self.testvol
|
||||
@ -108,43 +108,62 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
|
||||
self.testvol
|
||||
)
|
||||
|
||||
@mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid')
|
||||
@mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties')
|
||||
def test__get_connection_properties(self, mock_get_conn_props, mock_uuid):
|
||||
"""Test connection properties from a volume."""
|
||||
res = self.target._get_connection_properties_from_vol(self.testvol)
|
||||
self.assertEqual(mock_get_conn_props.return_value, res)
|
||||
mock_uuid.assert_called_once_with(self.testvol)
|
||||
mock_get_conn_props.assert_called_once_with(
|
||||
f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}',
|
||||
self.target_ip,
|
||||
str(self.target_port),
|
||||
self.nvme_transport_type,
|
||||
str(self.nvmet_ns_id),
|
||||
mock_uuid.return_value)
|
||||
|
||||
def test__get_connection_properties_old(self):
|
||||
"""Test connection properties with the old NVMe-oF format."""
|
||||
nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}'
|
||||
expected_return = {
|
||||
'target_portal': self.target_ip,
|
||||
'target_port': str(self.target_port),
|
||||
'nqn': "ngn.%s-%s" % (
|
||||
self.nvmet_subsystem_name, self.fake_volume_id),
|
||||
'nqn': nqn,
|
||||
'transport_type': self.nvme_transport_type,
|
||||
'ns_id': str(self.nvmet_ns_id)
|
||||
}
|
||||
self.assertEqual(expected_return,
|
||||
self.target._get_connection_properties(self.testvol))
|
||||
res = self.target._get_connection_properties(nqn,
|
||||
self.target_ip,
|
||||
str(self.target_port),
|
||||
self.nvme_transport_type,
|
||||
str(self.nvmet_ns_id),
|
||||
mock.sentinel.uuid)
|
||||
self.assertEqual(expected_return, res)
|
||||
|
||||
@ddt.data(('rdma', 'RoCEv2'), ('tcp', 'tcp'))
|
||||
@ddt.unpack
|
||||
@mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid')
|
||||
def test__get_connection_properties_new(self, transport,
|
||||
expected_transport, mock_uuid):
|
||||
def test__get_connection_properties_new(
|
||||
self, transport, expected_transport):
|
||||
"""Test connection properties with the new NVMe-oF format."""
|
||||
nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}'
|
||||
vol = self.testvol.copy()
|
||||
vol['provider_location'] = self.target.get_nvmeof_location(
|
||||
nqn, self.target_ip, self.target_port, transport, self.nvmet_ns_id)
|
||||
|
||||
self.configuration.nvmeof_conn_info_version = 2
|
||||
|
||||
expected_return = {
|
||||
'target_nqn': nqn,
|
||||
'vol_uuid': mock_uuid.return_value,
|
||||
'vol_uuid': mock.sentinel.uuid,
|
||||
'ns_id': str(self.nvmet_ns_id),
|
||||
'portals': [(self.target_ip,
|
||||
str(self.target_port),
|
||||
expected_transport)],
|
||||
}
|
||||
self.assertEqual(expected_return,
|
||||
self.target._get_connection_properties(vol))
|
||||
mock_uuid.assert_called_once_with(vol)
|
||||
res = self.target._get_connection_properties(nqn,
|
||||
self.target_ip,
|
||||
str(self.target_port),
|
||||
transport,
|
||||
str(self.nvmet_ns_id),
|
||||
mock.sentinel.uuid)
|
||||
self.assertEqual(expected_return, res)
|
||||
|
||||
def test_validate_connector(self):
|
||||
mock_connector = {'initiator': 'fake_init'}
|
||||
|
@ -14,6 +14,7 @@ from unittest import mock
|
||||
|
||||
import ddt
|
||||
|
||||
from cinder import exception
|
||||
from cinder.tests.unit.privsep.targets import fake_nvmet_lib
|
||||
from cinder.tests.unit.targets import targets_fixture as tf
|
||||
from cinder import utils
|
||||
@ -32,54 +33,148 @@ class TestNVMETDriver(tf.TargetDriverFixture):
|
||||
self.configuration.target_protocol = 'nvmet_rdma'
|
||||
self.target = nvmet.NVMET(root_helper=utils.get_root_helper(),
|
||||
configuration=self.configuration)
|
||||
self.target.share_targets = False
|
||||
fake_nvmet_lib.reset_mock()
|
||||
|
||||
def test_supports_shared(self):
|
||||
self.assertTrue(self.target.SHARED_TARGET_SUPPORT)
|
||||
|
||||
@mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
|
||||
@mock.patch.object(nvmet.NVMET, '_map_volume')
|
||||
def test_initialize_connection_non_shared(self, mock_map, mock_init_conn):
|
||||
"""Non shared initialize doesn't do anything (calls NVMeOF)."""
|
||||
res = self.target.initialize_connection(mock.sentinel.volume,
|
||||
mock.sentinel.connector)
|
||||
self.assertEqual(mock_init_conn.return_value, res)
|
||||
mock_init_conn.assert_called_once_with(mock.sentinel.volume,
|
||||
mock.sentinel.connector)
|
||||
mock_map.assert_not_called()
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
|
||||
@mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
def test_create_export(self, mock_nqn, mock_subsys, mock_port,
|
||||
mock_location, mock_uuid):
|
||||
"""Normal create target execution."""
|
||||
mock_nqn.return_value = mock.sentinel.nqn
|
||||
mock_uuid.return_value = mock.sentinel.uuid
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_connection_properties')
|
||||
@mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
|
||||
@mock.patch.object(nvmet.NVMET, '_map_volume')
|
||||
def test_initialize_connection_shared(
|
||||
self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists,
|
||||
mock_uuid):
|
||||
"""When sharing, the initialization maps the volume."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
|
||||
vol = mock.Mock()
|
||||
res = self.target.initialize_connection(vol, mock.sentinel.connector)
|
||||
|
||||
expected = {'driver_volume_type': 'nvmeof',
|
||||
'data': mock_get_conn_props.return_value}
|
||||
self.assertEqual(expected, res)
|
||||
|
||||
mock_init_conn.assert_not_called()
|
||||
mock_exists.assert_called_once_with(vol.provider_location)
|
||||
mock_map.assert_called_once_with(vol,
|
||||
vol.provider_location,
|
||||
mock.sentinel.connector)
|
||||
mock_uuid.assert_called_once_with(vol)
|
||||
mock_get_conn_props.assert_called_once_with(
|
||||
mock.sentinel.nqn,
|
||||
self.target.target_ip,
|
||||
self.target.target_port,
|
||||
self.target.nvme_transport_type,
|
||||
mock.sentinel.nsid,
|
||||
mock_uuid.return_value)
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
|
||||
@mock.patch('os.path.exists', return_value=False)
|
||||
@mock.patch.object(nvmet.NVMET, '_get_connection_properties')
|
||||
@mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
|
||||
@mock.patch.object(nvmet.NVMET, '_map_volume')
|
||||
def test_initialize_connection_shared_no_path(
|
||||
self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists,
|
||||
mock_uuid):
|
||||
"""Fails if the provided path is not present in the system."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
|
||||
vol = mock.Mock()
|
||||
self.assertRaises(exception.InvalidConfigurationValue,
|
||||
self.target.initialize_connection,
|
||||
vol, mock.sentinel.connector)
|
||||
|
||||
mock_init_conn.assert_not_called()
|
||||
mock_exists.assert_called_once_with(vol.provider_location)
|
||||
mock_map.assert_not_called()
|
||||
mock_uuid.assert_not_called()
|
||||
mock_get_conn_props.assert_not_called()
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
|
||||
@mock.patch.object(nvmet.NVMET, '_map_volume')
|
||||
def test_create_export(self, mock_map, mock_location):
|
||||
"""When not sharing, the export maps the volume."""
|
||||
mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
|
||||
|
||||
res = self.target.create_export(mock.sentinel.context,
|
||||
vol,
|
||||
mock.sentinel.vol,
|
||||
mock.sentinel.volume_path)
|
||||
|
||||
self.assertEqual({'location': mock_location.return_value, 'auth': ''},
|
||||
res)
|
||||
mock_nqn.assert_called_once_with(vol.id)
|
||||
mock_uuid.assert_called_once_with(vol)
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn,
|
||||
self.target.nvmet_ns_id,
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.uuid)
|
||||
mock_port.assert_called_once_with(mock.sentinel.nqn,
|
||||
self.target.target_ip,
|
||||
self.target.target_port,
|
||||
self.target.nvme_transport_type,
|
||||
self.target.nvmet_port_id)
|
||||
|
||||
mock_map.assert_called_once_with(mock.sentinel.vol,
|
||||
mock.sentinel.volume_path)
|
||||
mock_location.assert_called_once_with(mock.sentinel.nqn,
|
||||
self.target.target_ip,
|
||||
self.target.target_port,
|
||||
self.target.nvme_transport_type,
|
||||
self.target.nvmet_ns_id)
|
||||
mock.sentinel.nsid)
|
||||
|
||||
@ddt.data((ValueError, None), (None, IndexError))
|
||||
@ddt.unpack
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
|
||||
@mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
|
||||
@mock.patch.object(nvmet.NVMET, '_map_volume')
|
||||
def test_create_export_shared(self, mock_map, mock_location):
|
||||
"""When sharing, the export just stores the volume path as location."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
|
||||
res = self.target.create_export(mock.sentinel.context,
|
||||
mock.sentinel.vol,
|
||||
mock.sentinel.volume_path)
|
||||
|
||||
self.assertEqual({'location': mock.sentinel.volume_path, 'auth': ''},
|
||||
res)
|
||||
mock_map.assert_not_called()
|
||||
mock_location.assert_not_called()
|
||||
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
def test_create_export_error(self, subsys_effect, port_effect,
|
||||
mock_nqn, mock_subsys, mock_port,
|
||||
mock_location, mock_uuid):
|
||||
def test__map_volume(self, mock_nqn, mock_subsys, mock_port, mock_uuid,
|
||||
mock_lock):
|
||||
"""Normal volume mapping."""
|
||||
vol = mock.Mock()
|
||||
res = self.target._map_volume(vol, mock.sentinel.volume_path,
|
||||
mock.sentinel.connector)
|
||||
|
||||
expected = (mock_nqn.return_value, mock_subsys.return_value)
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector)
|
||||
mock_uuid.assert_called_once_with(vol)
|
||||
mock_subsys.assert_called_once_with(mock_nqn.return_value,
|
||||
mock.sentinel.volume_path,
|
||||
mock_uuid.return_value)
|
||||
mock_port.assert_called_once_with(mock_nqn.return_value,
|
||||
self.target.target_ip,
|
||||
self.target.target_port,
|
||||
self.target.nvme_transport_type,
|
||||
self.target.nvmet_port_id)
|
||||
mock_lock.assert_called()
|
||||
|
||||
@ddt.data((ValueError, None), (None, IndexError))
|
||||
@ddt.unpack
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
def test__map_volume_error(self, subsys_effect, port_effect, mock_nqn,
|
||||
mock_subsys, mock_port, mock_uuid, mock_lock):
|
||||
"""Failing create target executing subsystem or port creation."""
|
||||
mock_subsys.side_effect = subsys_effect
|
||||
mock_port.side_effect = port_effect
|
||||
@ -88,15 +183,14 @@ class TestNVMETDriver(tf.TargetDriverFixture):
|
||||
vol = mock.Mock()
|
||||
|
||||
self.assertRaises(nvmet.NVMETTargetAddError,
|
||||
self.target.create_export,
|
||||
mock.sentinel.context,
|
||||
self.target._map_volume,
|
||||
vol,
|
||||
mock.sentinel.volume_path)
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.connector)
|
||||
|
||||
mock_nqn.assert_called_once_with(vol.id)
|
||||
mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector)
|
||||
mock_uuid.assert_called_once_with(vol)
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn,
|
||||
self.target.nvmet_ns_id,
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.uuid)
|
||||
if subsys_effect:
|
||||
@ -107,26 +201,34 @@ class TestNVMETDriver(tf.TargetDriverFixture):
|
||||
self.target.target_port,
|
||||
self.target.nvme_transport_type,
|
||||
self.target.nvmet_port_id)
|
||||
mock_location.assert_not_called()
|
||||
mock_lock.assert_called()
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists')
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
def test__ensure_subsystem_exists_already_exists(self, mock_subsys):
|
||||
def test__ensure_subsystem_exists_already_exists(self, mock_subsys,
|
||||
mock_namespace):
|
||||
"""Skip subsystem creation if already exists."""
|
||||
nqn = 'nqn.nvme-subsystem-1-uuid'
|
||||
self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id,
|
||||
res = self.target._ensure_subsystem_exists(nqn,
|
||||
mock.sentinel.vol_path,
|
||||
mock.sentinel.uuid)
|
||||
self.assertEqual(mock_namespace.return_value, res)
|
||||
mock_subsys.assert_called_once_with(nqn)
|
||||
mock_subsys.setup.assert_not_called()
|
||||
mock_namespace.assert_called_once_with(mock_subsys.return_value,
|
||||
mock.sentinel.vol_path,
|
||||
mock.sentinel.uuid)
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists')
|
||||
@mock.patch('oslo_utils.uuidutils.generate_uuid')
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid):
|
||||
def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid,
|
||||
mock_namespace):
|
||||
"""Create subsystem when it doesn't exist."""
|
||||
mock_subsys.side_effect = priv_nvmet.NotFound
|
||||
mock_uuid.return_value = 'uuid'
|
||||
nqn = 'nqn.nvme-subsystem-1-uuid'
|
||||
self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id,
|
||||
self.target._ensure_subsystem_exists(nqn,
|
||||
mock.sentinel.vol_path,
|
||||
mock.sentinel.uuid)
|
||||
mock_subsys.assert_called_once_with(nqn)
|
||||
@ -137,10 +239,107 @@ class TestNVMETDriver(tf.TargetDriverFixture):
|
||||
'uuid': mock.sentinel.uuid,
|
||||
'path': mock.sentinel.vol_path},
|
||||
'enable': 1,
|
||||
'nsid': mock.sentinel.ns_id}],
|
||||
'nsid': self.target.nvmet_ns_id}],
|
||||
'nqn': nqn
|
||||
}
|
||||
mock_subsys.setup.assert_called_once_with(expected_section)
|
||||
mock_namespace.assert_not_called()
|
||||
|
||||
@mock.patch('oslo_utils.uuidutils.generate_uuid')
|
||||
def test__namespace_dict(self, mock_uuid):
|
||||
"""For not shared nguid is randomly generated."""
|
||||
res = self.target._namespace_dict(mock.sentinel.uuid,
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.ns_id)
|
||||
expected = {"device": {"nguid": str(mock_uuid.return_value),
|
||||
"uuid": mock.sentinel.uuid,
|
||||
"path": mock.sentinel.volume_path},
|
||||
"enable": 1,
|
||||
"nsid": mock.sentinel.ns_id}
|
||||
self.assertEqual(expected, res)
|
||||
mock_uuid.assert_called_once()
|
||||
|
||||
@mock.patch('oslo_utils.uuidutils.generate_uuid')
|
||||
def test__namespace_dict_shared(self, mock_uuid):
|
||||
"""For shared uuid = nguid."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
res = self.target._namespace_dict(mock.sentinel.uuid,
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.ns_id)
|
||||
expected = {"device": {"nguid": mock.sentinel.uuid,
|
||||
"uuid": mock.sentinel.uuid,
|
||||
"path": mock.sentinel.volume_path},
|
||||
"enable": 1,
|
||||
"nsid": mock.sentinel.ns_id}
|
||||
self.assertEqual(expected, res)
|
||||
mock_uuid.assert_not_called
|
||||
|
||||
def test__ensure_namespace_exist_exists(self):
|
||||
"""Nothing to do if the namespace is already mapped."""
|
||||
base_path = '/dev/stack-volumes-lvmdriver-1/volume-'
|
||||
volume_path = f'{base_path}uuid2'
|
||||
subsys = mock.Mock()
|
||||
ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'})
|
||||
ns_found = mock.Mock(**{'get_attr.return_value': volume_path})
|
||||
# nw_other appears twice to confirm we stop when found
|
||||
subsys.namespaces = [ns_other, ns_found, ns_other]
|
||||
res = self.target._ensure_namespace_exists(subsys, volume_path,
|
||||
mock.sentinel.uuid)
|
||||
self.assertEqual(ns_found.nsid, res)
|
||||
ns_other.get_attr.assert_called_once_with('device', 'path')
|
||||
ns_found.get_attr.assert_called_once_with('device', 'path')
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Namespace')
|
||||
@mock.patch.object(nvmet.NVMET, '_namespace_dict')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_available_namespace_id')
|
||||
def test__ensure_namespace_exist_create(self, mock_get_nsid, mock_ns_dict,
|
||||
mock_ns):
|
||||
"""Create the namespace when the path is not mapped yet."""
|
||||
base_path = '/dev/stack-volumes-lvmdriver-1/volume-'
|
||||
subsys = mock.Mock()
|
||||
ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'})
|
||||
subsys.namespaces = [ns_other]
|
||||
res = self.target._ensure_namespace_exists(subsys,
|
||||
mock.sentinel.volume_path,
|
||||
mock.sentinel.uuid)
|
||||
self.assertEqual(mock_get_nsid.return_value, res)
|
||||
ns_other.get_attr.assert_called_once_with('device', 'path')
|
||||
mock_get_nsid.assert_called_once_with(subsys)
|
||||
mock_ns_dict.assert_called_once_with(mock.sentinel.uuid,
|
||||
mock.sentinel.volume_path,
|
||||
mock_get_nsid.return_value)
|
||||
mock_ns.setup.assert_called_once_with(subsys,
|
||||
mock_ns_dict.return_value)
|
||||
|
||||
def test__get_available_namespace_id(self):
|
||||
"""For non shared we always return the value from the config."""
|
||||
res = self.target._get_available_namespace_id(mock.Mock())
|
||||
self.assertEqual(self.target.nvmet_ns_id, res)
|
||||
|
||||
def test__get_available_namespace_id_none_used(self):
|
||||
"""For shared, on empty subsystem return the configured value."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
subsys = mock.Mock(namespaces=[])
|
||||
res = self.target._get_available_namespace_id(subsys)
|
||||
self.assertEqual(self.target.nvmet_ns_id, res)
|
||||
|
||||
def test__get_available_namespace_id_no_gaps(self):
|
||||
"""For shared, if there are no gaps in ids return next."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
expected = self.target.nvmet_ns_id + 2
|
||||
subsys = mock.Mock(namespaces=[mock.Mock(nsid=expected - 1),
|
||||
mock.Mock(nsid=expected - 2)])
|
||||
res = self.target._get_available_namespace_id(subsys)
|
||||
self.assertEqual(expected, res)
|
||||
|
||||
def test__get_available_namespace_id_gap_value(self):
|
||||
"""For shared, if there is a gap any of them is valid."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
lower = self.target.nvmet_ns_id
|
||||
subsys = mock.Mock(namespaces=[mock.Mock(nsid=lower + 3),
|
||||
mock.Mock(nsid=lower)])
|
||||
res = self.target._get_available_namespace_id(subsys)
|
||||
self.assertTrue(res in [lower + 2, lower + 1])
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Port')
|
||||
def test__ensure_port_exports_already_does(self, mock_port):
|
||||
@ -193,66 +392,243 @@ class TestNVMETDriver(tf.TargetDriverFixture):
|
||||
new_port)
|
||||
mock_port.return_value.assert_not_called()
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, 'delete_nvmeof_target')
|
||||
def test_remove_export(self, mock_delete_target):
|
||||
"""Test that the nvmeof class calls the nvmet method."""
|
||||
res = self.target.remove_export(mock.sentinel.ctxt,
|
||||
mock.sentinel.volume)
|
||||
self.assertEqual(mock_delete_target.return_value, res)
|
||||
mock_delete_target.assert_called_once_with(mock.sentinel.volume)
|
||||
@mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
|
||||
def test_terminate_connection(self, mock_unmap):
|
||||
"""For non shared there's nothing to do."""
|
||||
self.target.terminate_connection(mock.sentinel.vol,
|
||||
mock.sentinel.connector)
|
||||
mock_unmap.assert_not_called()
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
@mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
|
||||
def test_terminate_connection_shared(self, mock_unmap):
|
||||
"""For shared the volume must be unmapped."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
vol = mock.Mock()
|
||||
self.target.terminate_connection(vol,
|
||||
mock.sentinel.connector)
|
||||
mock_unmap.assert_called_once_with(vol,
|
||||
mock.sentinel.connector)
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
|
||||
def test_remove_export(self, mock_unmap):
|
||||
"""For non shared the volume must be unmapped."""
|
||||
vol = mock.Mock()
|
||||
self.target.remove_export(mock.sentinel.context,
|
||||
vol)
|
||||
mock_unmap.assert_called_once_with(vol)
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
|
||||
def test_remove_export_shared(self, mock_unmap):
|
||||
"""For shared there's nothing to do."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
self.target.remove_export(mock.sentinel.context,
|
||||
mock.sentinel.vol)
|
||||
mock_unmap.assert_not_called()
|
||||
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nqns_for_location', return_value=[])
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
def test_delete_nvmeof_target_nothing_present(self, mock_nqn, mock_subsys):
|
||||
"""Delete doesn't do anything because there is nothing to do."""
|
||||
mock_nqn.return_value = mock.sentinel.nqn
|
||||
@mock.patch.object(nvmet.NVMET, '_unmap_volume')
|
||||
def test__locked_unmap_volume_no_nqn(self, mock_unmap, mock_nqn, mock_nqns,
|
||||
mock_lock):
|
||||
"""Nothing to do with no subsystem when sharing and no connector."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
|
||||
vol = mock.Mock()
|
||||
self.target._locked_unmap_volume(vol, connector=None)
|
||||
|
||||
mock_lock.assert_called()
|
||||
mock_nqn.assert_not_called()
|
||||
mock_nqns.assert_called_once_with(vol.provider_location)
|
||||
mock_unmap.assert_not_called()
|
||||
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nqns_for_location')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
@mock.patch.object(nvmet.NVMET, '_unmap_volume')
|
||||
def test__locked_unmap_volume_non_shared(self, mock_unmap, mock_nqn,
|
||||
mock_nqns, mock_lock):
|
||||
"""Unmap locked with non sharing and no connector."""
|
||||
vol = mock.Mock()
|
||||
self.target._locked_unmap_volume(vol, connector=None)
|
||||
|
||||
mock_lock.assert_called()
|
||||
mock_nqn.assert_called_once_with(vol.id, None)
|
||||
mock_nqns.assert_not_called()
|
||||
mock_unmap.assert_called_once_with(vol, mock_nqn.return_value)
|
||||
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_nqns_for_location')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
@mock.patch.object(nvmet.NVMET, '_unmap_volume')
|
||||
def test__locked_unmap_volume_shared_multiple(self, mock_unmap, mock_nqn,
|
||||
mock_nqns, mock_lock):
|
||||
"""Unmap locked with sharing and no connector, having multiple nqns."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
vol = mock.Mock()
|
||||
mock_nqns.return_value = [mock.sentinel.nqn1, mock.sentinel.nqn2]
|
||||
|
||||
self.target._locked_unmap_volume(vol, connector=None)
|
||||
|
||||
mock_lock.assert_called()
|
||||
mock_nqn.assert_not_called()
|
||||
mock_nqns.assert_called_once_with(vol.provider_location)
|
||||
|
||||
expected = [mock.call(vol, mock.sentinel.nqn1),
|
||||
mock.call(vol, mock.sentinel.nqn2)]
|
||||
mock_unmap.assert_has_calls(expected)
|
||||
self.assertEqual(2, mock_unmap.call_count)
|
||||
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
def test__unmap_volume_no_subsys(self, mock_subsys, mock_nqn):
|
||||
"""Nothing to do it there is no subsystem."""
|
||||
mock_subsys.side_effect = priv_nvmet.NotFound
|
||||
vol = mock.Mock()
|
||||
# This port is used just to confirm we don't reach that part
|
||||
port = mock.Mock(subsystems=[mock.sentinel.port])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port])
|
||||
|
||||
port1 = mock.Mock(subsystems=[])
|
||||
port2 = mock.Mock(subsystems=['subs1'])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port1, port2])
|
||||
self.target._unmap_volume(vol, mock.sentinel.nqn)
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn)
|
||||
|
||||
volume = mock.Mock(id='vol-uuid')
|
||||
self.target.delete_nvmeof_target(volume)
|
||||
|
||||
mock_nqn.assert_called_once_with(volume.id)
|
||||
port1.remove_subsystem.assert_not_called()
|
||||
port2.remove_subsystem.assert_not_called()
|
||||
mock_subsys.delete.assert_not_called()
|
||||
port.remove_subsystem.assert_not_called()
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
def test_delete_nvmeof_target(self, mock_nqn, mock_subsys):
|
||||
"""Delete removes subsystems from port and the subsystem."""
|
||||
mock_nqn.return_value = mock.sentinel.nqn
|
||||
def test__unmap_volume_not_shared(self, mock_subsys):
|
||||
"""Non shared assumes the subsystem is empty."""
|
||||
vol = mock.Mock()
|
||||
# The ns is used to confirm we don't check it
|
||||
ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
|
||||
subsys = mock_subsys.return_value
|
||||
subsys.nqn = mock.sentinel.nqn
|
||||
subsys.namespaces = [ns]
|
||||
|
||||
port1 = mock.Mock(subsystems=[])
|
||||
port2 = mock.Mock(subsystems=[mock.sentinel.nqn])
|
||||
port3 = mock.Mock(subsystems=['subs1'])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port1, port2, port3])
|
||||
port = mock.Mock(subsystems=[subsys.nqn])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port])
|
||||
|
||||
volume = mock.Mock(id='vol-uuid')
|
||||
self.target.delete_nvmeof_target(volume)
|
||||
self.target._unmap_volume(vol, mock.sentinel.nqn)
|
||||
|
||||
mock_nqn.assert_called_once_with(volume.id)
|
||||
port1.remove_subsystem.assert_not_called()
|
||||
port2.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
|
||||
port3.remove_subsystem.assert_not_called()
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn)
|
||||
|
||||
ns.get_attr.assert_not_called()
|
||||
ns.delete.assert_not_called()
|
||||
|
||||
port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
|
||||
subsys.delete.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
def test__unmap_volume_shared_more_ns(self, mock_subsys):
|
||||
"""For shared don't unexport subsys if there are other ns."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
vol = mock.Mock()
|
||||
|
||||
ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
|
||||
subsys = mock_subsys.return_value
|
||||
subsys.namespaces = [ns]
|
||||
|
||||
# Use this port to confirm we don't reach that point
|
||||
port = mock.Mock(subsystems=[subsys])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port])
|
||||
|
||||
self.target._unmap_volume(vol, mock.sentinel.nqn)
|
||||
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn)
|
||||
|
||||
ns.get_attr.assert_called_once_with('device', 'path')
|
||||
ns.delete.assert_called_once_with()
|
||||
|
||||
port.remove_subsystem.assert_not_called()
|
||||
mock_subsys.return_value.delete.assert_not_called()
|
||||
|
||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
|
||||
@mock.patch.object(priv_nvmet, 'Subsystem')
|
||||
def test__unmap_volume_shared_last_ns(self, mock_subsys, mock_nqn,
|
||||
mock_lock):
|
||||
"""For shared unexport subsys if there are no other ns."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
vol = mock.Mock()
|
||||
|
||||
ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
|
||||
nss = [ns]
|
||||
ns.delete.side_effect = nss.clear
|
||||
subsys = mock_subsys.return_value
|
||||
subsys.nqn = mock.sentinel.nqn
|
||||
subsys.namespaces = nss
|
||||
|
||||
port = mock.Mock(subsystems=[subsys.nqn])
|
||||
self.mock_object(priv_nvmet.Root, 'ports', [port])
|
||||
|
||||
self.target._unmap_volume(vol, mock.sentinel.nqn)
|
||||
|
||||
mock_subsys.assert_called_once_with(mock.sentinel.nqn)
|
||||
|
||||
ns.get_attr.assert_called_once_with('device', 'path')
|
||||
ns.delete.assert_called_once_with()
|
||||
|
||||
port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
|
||||
mock_subsys.return_value.delete.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(priv_nvmet, 'Root')
|
||||
def test__get_available_nvmf_subsystems(self, mock_root):
|
||||
res = self.target._get_available_nvmf_subsystems()
|
||||
mock_dump = mock_root.return_value.dump
|
||||
self.assertEqual(mock_dump.return_value, res)
|
||||
mock_dump.assert_called_once_with()
|
||||
|
||||
def test__get_target_nqn(self):
|
||||
res = self.target._get_target_nqn('volume_id')
|
||||
"""Non shared uses volume id for subsystem name."""
|
||||
res = self.target._get_target_nqn('volume_id', None)
|
||||
self.assertEqual('nqn.nvme-subsystem-1-volume_id', res)
|
||||
|
||||
def test__get_target_nqn_shared(self):
|
||||
"""Shared uses connector's hostname for subsystem name."""
|
||||
self.mock_object(self.target, 'share_targets', True)
|
||||
res = self.target._get_target_nqn('volume_id', {'host': 'localhost'})
|
||||
self.assertEqual('nqn.nvme-subsystem-1-localhost', res)
|
||||
|
||||
def test__get_nvme_uuid(self):
|
||||
vol = mock.Mock()
|
||||
res = self.target._get_nvme_uuid(vol)
|
||||
self.assertEqual(vol.name_id, res)
|
||||
|
||||
def test__get_nqns_for_location_no_subsystems(self):
|
||||
self.mock_object(self.target._nvmet_root, 'subsystems', iter([]))
|
||||
res = self.target._get_nqns_for_location(mock.sentinel.location)
|
||||
self.assertListEqual([], res)
|
||||
|
||||
def test__get_nqns_for_location_no_subsystems_found(self):
|
||||
ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
|
||||
subsys1 = mock.Mock(namespaces=iter([ns1]))
|
||||
|
||||
ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2})
|
||||
subsys2 = mock.Mock(namespaces=iter([ns2]))
|
||||
|
||||
subsys = iter([subsys1, subsys2])
|
||||
self.mock_object(self.target._nvmet_root, 'subsystems', subsys)
|
||||
|
||||
res = self.target._get_nqns_for_location(mock.sentinel.location3)
|
||||
|
||||
self.assertListEqual([], res)
|
||||
ns1.get_attr.assert_called_once_with('device', 'path')
|
||||
ns2.get_attr.assert_called_once_with('device', 'path')
|
||||
|
||||
def test__get_nqns_for_location_subsystems_found(self):
|
||||
ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
|
||||
subsys1 = mock.Mock(namespaces=iter([ns1]))
|
||||
|
||||
ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2})
|
||||
ns1b = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
|
||||
ns3 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location3})
|
||||
subsys2 = mock.Mock(namespaces=iter([ns2, ns1b, ns3]))
|
||||
|
||||
ns4 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location4})
|
||||
subsys3 = mock.Mock(namespaces=iter([ns4]))
|
||||
|
||||
subsys4 = mock.Mock(namespaces=iter([]))
|
||||
|
||||
subsys = iter([subsys1, subsys2, subsys3, subsys4])
|
||||
self.mock_object(self.target._nvmet_root, 'subsystems', subsys)
|
||||
|
||||
res = self.target._get_nqns_for_location(mock.sentinel.location1)
|
||||
|
||||
self.assertListEqual([subsys1.nqn, subsys2.nqn, subsys4.nqn], res)
|
||||
ns1.get_attr.assert_called_once_with('device', 'path')
|
||||
ns2.get_attr.assert_called_once_with('device', 'path')
|
||||
ns1b.get_attr.assert_called_once_with('device', 'path')
|
||||
ns3.get_attr.assert_not_called()
|
||||
ns4.get_attr.assert_called_once_with('device', 'path')
|
||||
|
@ -17,6 +17,7 @@ from unittest import mock
|
||||
import ddt
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
|
||||
from cinder.brick.local_dev import lvm as brick_lvm
|
||||
from cinder import db
|
||||
@ -47,6 +48,54 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
|
||||
FAKE_VOLUME = {'name': 'test1',
|
||||
'id': 'test1'}
|
||||
|
||||
def test___init___share_target_not_supported(self):
|
||||
"""Fail to use shared targets if target driver doesn't support it."""
|
||||
original_import = importutils.import_object
|
||||
|
||||
def wrap_target_as_no_shared_support(*args, **kwargs):
|
||||
res = original_import(*args, **kwargs)
|
||||
self.mock_object(res, 'SHARED_TARGET_SUPPORT', False)
|
||||
return res
|
||||
|
||||
self.patch('oslo_utils.importutils.import_object',
|
||||
side_effect=wrap_target_as_no_shared_support)
|
||||
|
||||
self.configuration.lvm_share_target = True
|
||||
self.assertRaises(exception.InvalidConfigurationValue,
|
||||
lvm.LVMVolumeDriver,
|
||||
configuration=self.configuration)
|
||||
|
||||
def test___init___share_target_supported(self):
|
||||
"""OK to use shared targets if target driver supports it."""
|
||||
original_import = importutils.import_object
|
||||
|
||||
def wrap_target_as_no_shared_support(*args, **kwargs):
|
||||
res = original_import(*args, **kwargs)
|
||||
self.mock_object(res, 'SHARED_TARGET_SUPPORT', True)
|
||||
return res
|
||||
|
||||
self.patch('oslo_utils.importutils.import_object',
|
||||
side_effect=wrap_target_as_no_shared_support)
|
||||
|
||||
self.configuration.lvm_share_target = True
|
||||
lvm.LVMVolumeDriver(configuration=self.configuration)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test___init___share_target_not_requested(self, supports_shared):
|
||||
"""For non shared it works regardless of target driver support."""
|
||||
original_import = importutils.import_object
|
||||
|
||||
def wrap_target_as_no_shared_support(*args, **kwargs):
|
||||
res = original_import(*args, **kwargs)
|
||||
self.mock_object(res, 'SHARED_TARGET_SUPPORT', supports_shared)
|
||||
return res
|
||||
|
||||
self.patch('oslo_utils.importutils.import_object',
|
||||
side_effect=wrap_target_as_no_shared_support)
|
||||
|
||||
self.configuration.lvm_share_target = False
|
||||
lvm.LVMVolumeDriver(configuration=self.configuration)
|
||||
|
||||
@mock.patch.object(os.path, 'exists', return_value=True)
|
||||
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
|
||||
def test_delete_volume_invalid_parameter(self, _mock_create_export,
|
||||
|
@ -515,6 +515,7 @@ class SpdkDriverTestCase(test.TestCase):
|
||||
mock_safe_get = mock.Mock()
|
||||
mock_safe_get.return_value = 'spdk-nvmeof'
|
||||
self.configuration.safe_get = mock_safe_get
|
||||
self.configuration.lvm_share_target = False
|
||||
self.jsonrpcclient = JSONRPCClient()
|
||||
self.driver = spdk_driver.SPDKDriver(configuration=
|
||||
self.configuration)
|
||||
|
@ -279,8 +279,9 @@ nvmet_opts = [
|
||||
help='The port that the NVMe target is listening on.'),
|
||||
cfg.IntOpt('nvmet_ns_id',
|
||||
default=10,
|
||||
help='The namespace id associated with the subsystem '
|
||||
'that will be created with the path for the LVM volume.'),
|
||||
help='Namespace id for the subsystem for the LVM volume when '
|
||||
'not sharing targets. The minimum id value when sharing.'
|
||||
'Maximum supported value in Linux is 8192')
|
||||
]
|
||||
|
||||
scst_opts = [
|
||||
|
@ -68,6 +68,10 @@ volume_opts = [
|
||||
default=False,
|
||||
help='Suppress leaked file descriptor warnings in LVM '
|
||||
'commands.'),
|
||||
cfg.BoolOpt('lvm_share_target',
|
||||
default=False,
|
||||
help='Whether to share the same target for all LUNs or not '
|
||||
'(currently only supported by nvmet.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -108,7 +112,12 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
target_driver,
|
||||
configuration=self.configuration,
|
||||
executor=self._execute)
|
||||
self.protocol = self.target_driver.protocol
|
||||
self.protocol = (self.target_driver.storage_protocol or
|
||||
self.target_driver.protocol)
|
||||
if (self.configuration.lvm_share_target
|
||||
and not self.target_driver.SHARED_TARGET_SUPPORT):
|
||||
raise exception.InvalidConfigurationValue(
|
||||
f"{target_driver} doesn't support shared targets")
|
||||
self._sparse_copy_volume = False
|
||||
|
||||
@classmethod
|
||||
@ -285,8 +294,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
backend_state='up'
|
||||
))
|
||||
data["pools"].append(single_pool)
|
||||
data["shared_targets"] = False
|
||||
|
||||
data["shared_targets"] = self.configuration.lvm_share_target
|
||||
# Check availability of sparse volume copy.
|
||||
data['sparse_copy_volume'] = self._sparse_copy_volume
|
||||
|
||||
|
@ -31,6 +31,8 @@ class Target(object, metaclass=abc.ABCMeta):
|
||||
well as force implementation of required methods.
|
||||
|
||||
"""
|
||||
storage_protocol = None
|
||||
SHARED_TARGET_SUPPORT = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# TODO(stephenfin): Drop this in favour of using 'db' directly
|
||||
|
@ -10,8 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from cinder.common import constants
|
||||
@ -47,6 +45,9 @@ class NVMeOF(driver.Target):
|
||||
self.nvmet_port_id = self.configuration.nvmet_port_id
|
||||
self.nvmet_ns_id = self.configuration.nvmet_ns_id
|
||||
self.nvmet_subsystem_name = self.configuration.target_prefix
|
||||
# Compatibility with non lvm drivers
|
||||
self.share_targets = getattr(self.configuration,
|
||||
'lvm_share_target', False)
|
||||
target_protocol = self.configuration.target_protocol
|
||||
if target_protocol in self.target_protocol_map:
|
||||
self.nvme_transport_type = self.target_protocol_map[
|
||||
@ -61,7 +62,7 @@ class NVMeOF(driver.Target):
|
||||
|
||||
In NVMeOF driver, :driver_volume_type: is set to 'nvmeof',
|
||||
:data: is the driver data that has the value of
|
||||
_get_connection_properties.
|
||||
_get_connection_properties_from_vol.
|
||||
|
||||
Example return value:
|
||||
|
||||
@ -81,12 +82,36 @@ class NVMeOF(driver.Target):
|
||||
"""
|
||||
return {
|
||||
'driver_volume_type': self.protocol,
|
||||
'data': self._get_connection_properties(volume)
|
||||
'data': self._get_connection_properties_from_vol(volume)
|
||||
}
|
||||
|
||||
def _get_connection_properties(self, volume):
|
||||
def _get_connection_properties_from_vol(self, volume):
|
||||
"""Gets NVMeOF connection configuration.
|
||||
|
||||
Returns the connection info based on the volume's provider_location and
|
||||
the _get_nvme_uuid method for the volume.
|
||||
|
||||
For the specific data returned check the _get_connection_properties
|
||||
method.
|
||||
|
||||
:return: dictionary with the connection properties using one of the 2
|
||||
existing formats depending on the nvmeof_new_conn_info
|
||||
configuration option.
|
||||
"""
|
||||
location = volume['provider_location']
|
||||
target_connection, nvme_transport_type, nqn, nvmet_ns_id = (
|
||||
location.split(' '))
|
||||
target_portal, target_port = target_connection.split(':')
|
||||
|
||||
uuid = self._get_nvme_uuid(volume)
|
||||
return self._get_connection_properties(nqn, target_portal, target_port,
|
||||
nvme_transport_type,
|
||||
nvmet_ns_id, uuid)
|
||||
|
||||
def _get_connection_properties(self, nqn, portal, port, transport, ns_id,
|
||||
uuid):
|
||||
"""Get connection properties dictionary.
|
||||
|
||||
For nvmeof_conn_info_version set to 1 (default) the old format will
|
||||
be sent:
|
||||
{
|
||||
@ -114,33 +139,32 @@ class NVMeOF(driver.Target):
|
||||
existing formats depending on the nvmeof_conn_info_version
|
||||
configuration option.
|
||||
"""
|
||||
location = volume['provider_location']
|
||||
target_connection, nvme_transport_type, nqn, nvmet_ns_id = (
|
||||
location.split(' '))
|
||||
target_portal, target_port = target_connection.split(':')
|
||||
|
||||
# NVMe-oF Connection Information Version 2
|
||||
if self.configuration.nvmeof_conn_info_version == 2:
|
||||
uuid = self._get_nvme_uuid(volume)
|
||||
if nvme_transport_type == 'rdma':
|
||||
nvme_transport_type = 'RoCEv2'
|
||||
if transport == 'rdma':
|
||||
transport = 'RoCEv2'
|
||||
|
||||
if transport == 'rdma':
|
||||
transport = 'RoCEv2'
|
||||
|
||||
return {
|
||||
'target_nqn': nqn,
|
||||
'vol_uuid': uuid,
|
||||
'portals': [(target_portal, target_port, nvme_transport_type)],
|
||||
'ns_id': nvmet_ns_id,
|
||||
'portals': [(portal, port, transport)],
|
||||
'ns_id': ns_id,
|
||||
}
|
||||
|
||||
# NVMe-oF Connection Information Version 1
|
||||
return {
|
||||
'target_portal': target_portal,
|
||||
'target_port': target_port,
|
||||
result = {
|
||||
'target_portal': portal,
|
||||
'target_port': port,
|
||||
'nqn': nqn,
|
||||
'transport_type': nvme_transport_type,
|
||||
'ns_id': nvmet_ns_id,
|
||||
'transport_type': transport,
|
||||
'ns_id': ns_id,
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def _get_nvme_uuid(self, volume):
|
||||
"""Return the NVMe uuid of a given volume.
|
||||
|
||||
@ -207,6 +231,6 @@ class NVMeOF(driver.Target):
|
||||
"""Targets that don't override create_export must implement this."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_nvmeof_target(self, target_name):
|
||||
"""Targets that don't override remove_export must implement this."""
|
||||
pass
|
||||
|
@ -10,6 +10,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
@ -31,20 +33,64 @@ class NVMETTargetDeleteError(exception.CinderException):
|
||||
|
||||
|
||||
class NVMET(nvmeof.NVMeOF):
|
||||
SHARED_TARGET_SUPPORT = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._nvmet_root = nvmet.Root()
|
||||
|
||||
@utils.synchronized('nvmetcli', external=True)
|
||||
# ####### Connection initiation methods ########
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Create an export & map if shared."""
|
||||
# Non-shared connections was the original implementation where all the
|
||||
# export & mapping was done on export and the connection info was
|
||||
# stored in the volume, so let the original implementation handle it.
|
||||
if not self.share_targets:
|
||||
return super().initialize_connection(volume, connector)
|
||||
|
||||
# For the shared case the export only stores the path of the volume
|
||||
volume_path = volume.provider_location
|
||||
if not os.path.exists(volume_path):
|
||||
raise exception.InvalidConfigurationValue(
|
||||
'Target driver configured with shared targets, but volume '
|
||||
'exported as non shared.')
|
||||
|
||||
nqn, ns_id = self._map_volume(volume, volume_path, connector)
|
||||
uuid = self._get_nvme_uuid(volume)
|
||||
return {
|
||||
'driver_volume_type': self.protocol,
|
||||
'data': self._get_connection_properties(nqn,
|
||||
self.target_ip,
|
||||
self.target_port,
|
||||
self.nvme_transport_type,
|
||||
ns_id, uuid),
|
||||
}
|
||||
|
||||
def create_export(self, context, volume, volume_path):
|
||||
"""Create an export & map if not shared."""
|
||||
# For shared targets everything gets done on initialize_connection
|
||||
if self.share_targets:
|
||||
location = volume_path
|
||||
else:
|
||||
nqn, ns_id = self._map_volume(volume, volume_path)
|
||||
location = self.get_nvmeof_location(nqn,
|
||||
self.target_ip,
|
||||
self.target_port,
|
||||
self.nvme_transport_type,
|
||||
ns_id)
|
||||
|
||||
return {'location': location, 'auth': ''}
|
||||
|
||||
@utils.synchronized('nvmetcli', external=True)
|
||||
def _map_volume(self, volume, volume_path, connector=None):
|
||||
"""Ensure a volume is exported and mapped in nvmet."""
|
||||
# Create NVME subsystem for previously created LV
|
||||
nqn = self._get_target_nqn(volume.id)
|
||||
nqn = self._get_target_nqn(volume.id, connector)
|
||||
try:
|
||||
uuid = self._get_nvme_uuid(volume)
|
||||
|
||||
self._ensure_subsystem_exists(nqn, self.nvmet_ns_id, volume_path,
|
||||
uuid)
|
||||
ns_id = self._ensure_subsystem_exists(nqn, volume_path, uuid)
|
||||
|
||||
self._ensure_port_exports(nqn, self.target_ip, self.target_port,
|
||||
self.nvme_transport_type,
|
||||
@ -54,44 +100,97 @@ class NVMET(nvmeof.NVMeOF):
|
||||
raise NVMETTargetAddError(subsystem=nqn)
|
||||
|
||||
LOG.info('Subsystem %s now exported on port %s', nqn, self.target_port)
|
||||
return {
|
||||
'location': self.get_nvmeof_location(
|
||||
nqn,
|
||||
self.target_ip,
|
||||
self.target_port,
|
||||
self.nvme_transport_type,
|
||||
self.nvmet_ns_id),
|
||||
'auth': ''}
|
||||
return nqn, ns_id
|
||||
|
||||
def _ensure_subsystem_exists(self, nqn, nvmet_ns_id, volume_path, uuid):
|
||||
def _ensure_subsystem_exists(self, nqn, volume_path, uuid):
|
||||
"""Ensure a subsystem and namespace exist in nvmet."""
|
||||
# Assume if subsystem exists, it has the right configuration
|
||||
try:
|
||||
nvmet.Subsystem(nqn)
|
||||
subsystem = nvmet.Subsystem(nqn)
|
||||
LOG.debug('Skip creating subsystem %s as it already exists.', nqn)
|
||||
return
|
||||
|
||||
ns_id = self._ensure_namespace_exists(subsystem, volume_path, uuid)
|
||||
return ns_id
|
||||
|
||||
except nvmet.NotFound:
|
||||
LOG.debug('Creating subsystem %s.', nqn)
|
||||
|
||||
ns_id = self.nvmet_ns_id
|
||||
subsystem_section = {
|
||||
"allowed_hosts": [],
|
||||
"attr": {
|
||||
"allow_any_host": "1"
|
||||
},
|
||||
"namespaces": [
|
||||
{
|
||||
"device": {
|
||||
"nguid": str(uuidutils.generate_uuid()),
|
||||
"uuid": uuid,
|
||||
"path": volume_path,
|
||||
},
|
||||
"enable": 1,
|
||||
"nsid": nvmet_ns_id
|
||||
}
|
||||
],
|
||||
"namespaces": [self._namespace_dict(uuid, volume_path, ns_id)],
|
||||
"nqn": nqn}
|
||||
|
||||
nvmet.Subsystem.setup(subsystem_section) # privsep
|
||||
LOG.debug('Added subsystem: %s', nqn)
|
||||
return ns_id
|
||||
|
||||
def _namespace_dict(self, uuid, volume_path, ns_id):
|
||||
"""Build the dict data for a new namespace in nvmet library format."""
|
||||
if self.share_targets:
|
||||
nguid = uuid
|
||||
LOG.debug('Sharing subsystem, using nguid = uuid = %s', nguid)
|
||||
else:
|
||||
nguid = str(uuidutils.generate_uuid())
|
||||
LOG.debug('Not sharing subsystem, using randmo nguid = %s', nguid)
|
||||
return {
|
||||
"device": {
|
||||
"nguid": nguid,
|
||||
"uuid": uuid,
|
||||
"path": volume_path,
|
||||
},
|
||||
"enable": 1,
|
||||
"nsid": ns_id
|
||||
}
|
||||
|
||||
def _ensure_namespace_exists(self, subsystem, volume_path, uuid):
|
||||
"""Ensure the namespace exists in nvmet."""
|
||||
for ns in subsystem.namespaces:
|
||||
if ns.get_attr('device', 'path') == volume_path:
|
||||
return ns.nsid
|
||||
|
||||
ns_id = self._get_available_namespace_id(subsystem)
|
||||
ns_data = self._namespace_dict(uuid, volume_path, ns_id)
|
||||
nvmet.Namespace.setup(subsystem, ns_data)
|
||||
return ns_id
|
||||
|
||||
def _get_available_namespace_id(self, subsystem):
|
||||
"""Get the next available ns_id.
|
||||
|
||||
Shared targets will have multiple namespaces under the same subsystem,
|
||||
so we cannot use self.nvmet_ns_id for them all.
|
||||
|
||||
This method searches for an available namespace id in the provided
|
||||
subsystem considering all ids below self.nvmet_ns_id as reserved.
|
||||
|
||||
We cannot let the nvmet library assign it automatically because it
|
||||
starts assigning from 1.
|
||||
|
||||
For non shared the method returns configured nvmet_ns_id.
|
||||
"""
|
||||
minimum = self.nvmet_ns_id
|
||||
|
||||
if not self.share_targets:
|
||||
return minimum
|
||||
|
||||
used = [ns.nsid for ns in subsystem.namespaces if ns.nsid >= minimum]
|
||||
|
||||
if not used:
|
||||
return minimum
|
||||
|
||||
higher = max(used)
|
||||
# If there are no gaps return the next available id
|
||||
if len(used) > higher - minimum:
|
||||
if higher == nvmet.Namespace.MAX_NSID:
|
||||
raise Exception('Reached max namespaces in subsystem')
|
||||
return higher + 1
|
||||
|
||||
# Find an id in the gaps. Don't include higher, as we know it's used
|
||||
available = set(range(minimum, higher)).difference(used)
|
||||
return available.pop()
|
||||
|
||||
def _get_nvme_uuid(self, volume):
|
||||
return volume.name_id
|
||||
@ -127,34 +226,100 @@ class NVMET(nvmeof.NVMeOF):
|
||||
port.add_subsystem(nqn) # privsep
|
||||
LOG.debug('Exported %s on port %s', nqn, port_id)
|
||||
|
||||
@utils.synchronized('nvmetcli', external=True)
|
||||
def delete_nvmeof_target(self, volume):
|
||||
subsystem_name = self._get_target_nqn(volume.id)
|
||||
LOG.debug('Removing subsystem: %s', subsystem_name)
|
||||
# ####### Connection termination methods ########
|
||||
|
||||
for port in self._nvmet_root.ports:
|
||||
if subsystem_name in port.subsystems:
|
||||
LOG.debug('Removing %s from port %s',
|
||||
subsystem_name, port.portid)
|
||||
port.remove_subsystem(subsystem_name)
|
||||
|
||||
try:
|
||||
subsys = nvmet.Subsystem(subsystem_name)
|
||||
LOG.debug('Deleting %s', subsystem_name)
|
||||
subsys.delete() # privsep call
|
||||
LOG.info('Subsystem %s removed', subsystem_name)
|
||||
except nvmet.NotFound:
|
||||
LOG.info('Skipping remove_export. No NVMe subsystem for volume: '
|
||||
'%s', volume.id)
|
||||
except Exception:
|
||||
LOG.error('Failed to delete subsystem: %s', subsystem_name)
|
||||
raise NVMETTargetDeleteError(subsystem=subsystem_name)
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Remove the mapping for shared."""
|
||||
# TODO: Add support for force and other parameters
|
||||
if self.share_targets:
|
||||
self._locked_unmap_volume(volume, connector)
|
||||
LOG.info('Volume %s is no longer exported', volume.id)
|
||||
|
||||
def _get_available_nvmf_subsystems(self):
|
||||
nvme_root = nvmet.Root()
|
||||
subsystems = nvme_root.dump()
|
||||
return subsystems
|
||||
def remove_export(self, context, volume):
|
||||
"""Remove the mapping for non shared."""
|
||||
if not self.share_targets:
|
||||
self._locked_unmap_volume(volume)
|
||||
LOG.info('Volume %s is no longer exported', volume.id)
|
||||
|
||||
def _get_target_nqn(self, volume_id):
|
||||
return "nqn.%s-%s" % (self.nvmet_subsystem_name, volume_id)
|
||||
@utils.synchronized('nvmetcli', external=True)
|
||||
def _locked_unmap_volume(self, volume, connector=None):
|
||||
"""Remove volume's ns from subsystem and subsystem if empty."""
|
||||
if connector or not self.share_targets:
|
||||
nqns = [self._get_target_nqn(volume.id, connector)]
|
||||
else:
|
||||
# We need to remove all existing maps (we are sharing)
|
||||
LOG.debug('Removing EVERYTHING for volume %s', volume.id)
|
||||
nqns = self._get_nqns_for_location(volume.provider_location)
|
||||
|
||||
exceptions = []
|
||||
for nqn in nqns:
|
||||
try:
|
||||
self._unmap_volume(volume, nqn)
|
||||
except Exception as exc:
|
||||
exceptions.append(exc)
|
||||
|
||||
# TODO: Once we only support Python 3.11+ use ExceptionGroup to raise
|
||||
# all the exceptions.
|
||||
if exceptions:
|
||||
raise exceptions[0]
|
||||
|
||||
def _unmap_volume(self, volume, nqn):
|
||||
try:
|
||||
subsystem = nvmet.Subsystem(nqn)
|
||||
except nvmet.NotFound:
|
||||
LOG.info('Skipping unmapping. No NVMe subsystem for volume: %s',
|
||||
volume.id)
|
||||
return
|
||||
|
||||
if self.share_targets:
|
||||
volume_path = volume.provider_location
|
||||
for ns in subsystem.namespaces:
|
||||
if ns.get_attr('device', 'path') == volume_path:
|
||||
LOG.debug('Deleting namespace %s', ns.nsid)
|
||||
ns.delete() # privsep call
|
||||
break
|
||||
|
||||
# If there are still namespaces we cannot remove the subsystem
|
||||
if any(s for s in subsystem.namespaces):
|
||||
return
|
||||
|
||||
for port in self._nvmet_root.ports:
|
||||
if nqn in port.subsystems:
|
||||
LOG.debug('Removing %s from port %s', nqn, port.portid)
|
||||
port.remove_subsystem(nqn) # privsep call
|
||||
|
||||
LOG.debug('Deleting %s', nqn)
|
||||
subsystem.delete() # privsep call
|
||||
LOG.info('Subsystem %s removed', nqn)
|
||||
|
||||
# ####### General methods ########
|
||||
|
||||
def _get_target_nqn(self, volume_id, connector):
|
||||
# For shared targets the subsystem is named after the host
|
||||
if self.share_targets:
|
||||
postfix = connector['host']
|
||||
else:
|
||||
postfix = volume_id
|
||||
return f'nqn.{self.nvmet_subsystem_name}-{postfix}'
|
||||
|
||||
def _get_nqns_for_location(self, provider_location):
|
||||
"""Get all subystem nqns for a give provider location.
|
||||
|
||||
This also returns empty subsystems, since we don't know if those were
|
||||
created to try to use them for the volume of the provider_location and
|
||||
failed during the creation.
|
||||
|
||||
This method needs to be called within the nvmetcli locked section.
|
||||
"""
|
||||
nqns = []
|
||||
for subsys in self._nvmet_root.subsystems:
|
||||
empty = True # subsytems is an iterable, can check it with bool
|
||||
found = False
|
||||
for ns in subsys.namespaces:
|
||||
empty = False
|
||||
if ns.get_attr('device', 'path') == provider_location:
|
||||
found = True
|
||||
break
|
||||
if found or empty:
|
||||
nqns.append(subsys.nqn)
|
||||
return nqns
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
nvmet target driver: Added support for shared subsystems/targets using the
|
||||
``lvm_share_target`` configuration option. Defaults to non shared, e.g.,
|
||||
each volume has its own subsystem/target.
|
Loading…
Reference in New Issue
Block a user