From 00793ac09ba125c6af43bbbfeadc483de042815b Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Tue, 8 Mar 2022 16:10:40 +0100 Subject: [PATCH] LVM-nvmet: Use nvmetcli as library instead of CLI The nvmet target was using the nvmetcli command to manage the NVMe-oF targets, but the command has a big limitation when it comes to controlling it's behaviour through the command line, as it can only restore all its configuration and small parts, such as ports or subsystems, cannot be directly modified without entering into the interactive mode. Due to this limitation the nvmet target would: - Save current nvmet configuration - Make changes to the json data - Restore the updated configuration The problem with this process, besides being slow because it runs a CLI command and uses temporary files, is that the restoration completely deletes EVERYTHING, before recreating it again. This means that all hosts that are connected to volumes will suddenly experience a disconnect to the volumes (because the namespace and subsystems have disappeared) and will keep retrying to connect. The reconnect succeeds after the configuration has been restored by nvmet, but that's 10 to 20 seconds that hosts cannot access volumes (this may block things in VMs) and will present nnvme kernel log error messages. To fix all these issues, speed and disconnect, this patch stops using the nvmetcli as a CLI and uses it as a Python library, since that's the most feature rich functionality of nvmetcli. Querying the nvmet system can be done directly with the library, but to make changes (create/destroy ports, subsystems, namespaces) it requires privileges, so this patch adds a privsep wrapper for the operations that we use and cannot be done as a normal user. The nvmet wrapper doesn't provide privsep support for ALL operations, only for those that we currently use. Due to the client-server architecture of privsep and nvmet using non primitive instances as parameters, the privsep wrapper needs custom serialization code to pass these instances. As a side effect of the refactoring we also fix a bug were we tried to create the port over and over again on each create_export call which resulted in nvme kernel warning logs. Closes-Bug: #1964391 Closes-Bug: #1964394 Change-Id: Icae9802713867fa148bc041c86beb010086dacc9 --- cinder/privsep/nvmcli.py | 41 -- cinder/privsep/path.py | 15 - cinder/privsep/targets/nvmet.py | 209 ++++++++ cinder/tests/unit/privsep/__init__.py | 0 cinder/tests/unit/privsep/targets/__init__.py | 0 .../unit/privsep/targets/fake_nvmet_lib.py | 40 ++ .../tests/unit/privsep/targets/test_nvmet.py | 342 +++++++++++++ .../tests/unit/targets/test_nvmet_driver.py | 460 +++++++++--------- cinder/volume/targets/nvmet.py | 220 +++------ doc/source/conf.py | 3 + .../lvm-nvmet-fixes-fc5e867abc699633.yaml | 12 + 11 files changed, 913 insertions(+), 429 deletions(-) delete mode 100644 cinder/privsep/nvmcli.py create mode 100644 cinder/privsep/targets/nvmet.py create mode 100644 cinder/tests/unit/privsep/__init__.py create mode 100644 cinder/tests/unit/privsep/targets/__init__.py create mode 100644 cinder/tests/unit/privsep/targets/fake_nvmet_lib.py create mode 100644 cinder/tests/unit/privsep/targets/test_nvmet.py create mode 100644 releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml diff --git a/cinder/privsep/nvmcli.py b/cinder/privsep/nvmcli.py deleted file mode 100644 index 95f5b1c569d..00000000000 --- a/cinder/privsep/nvmcli.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2018 Red Hat, Inc -# Copyright 2017 Rackspace Australia -# Copyright 2018 Michael Still and Aptira -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for nvmetcli related routines. -""" - -from oslo_concurrency import processutils - -import cinder.privsep - - -@cinder.privsep.sys_admin_pctxt.entrypoint -def restore(path): - cmd = [ - 'nvmetcli', - 'restore', - path] - return processutils.execute(*cmd) - - -@cinder.privsep.sys_admin_pctxt.entrypoint -def save(path): - cmd = [ - 'nvmetcli', - 'save', - path] - return processutils.execute(*cmd) diff --git a/cinder/privsep/path.py b/cinder/privsep/path.py index 8ab98e54cb2..6fd7405fdab 100644 --- a/cinder/privsep/path.py +++ b/cinder/privsep/path.py @@ -24,21 +24,6 @@ from cinder import exception import cinder.privsep -@cinder.privsep.sys_admin_pctxt.entrypoint -def readfile(path): - if not os.path.exists(path): - raise exception.FileNotFound(file_path=path) - with open(path, 'r') as f: - return f.read() - - -@cinder.privsep.sys_admin_pctxt.entrypoint -def removefile(path): - if not os.path.exists(path): - raise exception.FileNotFound(file_path=path) - os.unlink(path) - - @cinder.privsep.sys_admin_pctxt.entrypoint def touch(path): if os.path.exists(path): diff --git a/cinder/privsep/targets/nvmet.py b/cinder/privsep/targets/nvmet.py new file mode 100644 index 00000000000..9b2f5c8f7a9 --- /dev/null +++ b/cinder/privsep/targets/nvmet.py @@ -0,0 +1,209 @@ +# Copyright 2022 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""NVMet Python Interface using privsep + +This file adds the privsep support to the nvmet package so it can be easily +consumed by Cinder nvmet target. + +It also: + +- Adds some methods to the Root class to be able to get a specific subsystem or + port directly without having to go through all the existing ones. + +- Presents the CFSNotFound exception as a NotFound exception which is easier to + consume. +""" +import os + +import nvmet +from oslo_log import log as logging + +from cinder import exception +from cinder import privsep + + +LOG = logging.getLogger(__name__) + + +################### +# Helper methods to serialize/deserialize parameters to be sent through privsep +# and to do the instance/class calls on the privsep side. + +def serialize(instance): + """Serialize parameters, specially nvmet instances. + + The idea is to be able to pass an nvmet instance to privsep methods, since + they are sometimes required as parameters (ie: port.setup) and also to pass + the instance where do_privsep_call has to call a specific method. + + Instances are passed as a tuple, with the name of the class as the first + element, and in the second element the kwargs necessary to instantiate the + instance of that class. + + To differentiate nvmet instances from tuples there is a 'tuple' value that + can be passed in the first element of the tuple to differentiate them. + + All other instances as passed unaltered. + """ + if isinstance(instance, nvmet.Root): + return ('Root', {}) + + if isinstance(instance, (nvmet.Subsystem, nvmet.Host)): + return (type(instance).__name__, {'nqn': instance.nqn, + 'mode': 'lookup'}) + + if isinstance(instance, nvmet.Namespace): + return ('Namespace', {'nsid': instance.nsid, + 'subsystem': serialize(instance.subsystem), + 'mode': 'lookup'}) + + if isinstance(instance, nvmet.Port): + return ('Port', {'portid': instance.portid, 'mode': 'lookup'}) + + if isinstance(instance, nvmet.Referral): + return ('Referral', {'name': instance.name, + 'port': serialize(instance.port), + 'mode': 'lookup'}) + + if isinstance(instance, nvmet.ANAGroup): + return ('ANAGroup', {'grpid': instance.grpid, + 'port': serialize(instance.port), + 'mode': 'lookup'}) + + if isinstance(instance, tuple): + return ('tuple', instance) + + return instance + + +def deserialize(data): + """Deserialize an instance, specially nvmet instances. + + Reverse operation of the serialize method. Converts an nvmet instance + serialized in a tuple into an actual nvmet instance. + """ + if not isinstance(data, tuple): + return data + + cls_name, cls_params = data + if cls_name == 'tuple': + return cls_params + + # Parameters for the instantiation of the class can be nvmet objects + # themselves. + params = {name: deserialize(value) for name, value in cls_params.items()} + # We don't want the classes from the nvmet method but ours instead + instance = getattr(nvmet, cls_name)(**params) + return instance + + +def deserialize_params(args, kwargs): + """Deserialize function arguments using deserialize method.""" + args = [deserialize(arg) for arg in args] + kwargs = {key: deserialize(value) for key, value in kwargs.items()} + return args, kwargs + + +def _nvmet_setup_failure(message): + """Simple error method to use when calling nvmet setup methods.""" + LOG.error(message) + raise exception.CinderException(message) + + +@privsep.sys_admin_pctxt.entrypoint +def do_privsep_call(instance, method_name, *args, **kwargs): + """General privsep method for instance calls. + + Handle privsep method calls by deserializing the instance where we want to + call a given method with the deserialized parameters. + """ + LOG.debug('Calling %s on %s with %s - %s', + method_name, instance, args, kwargs) + instance = deserialize(instance) + method = getattr(instance, method_name) + args, kwargs = deserialize_params(args, kwargs) + # NOTE: No returning nvmet objects support. If needed add serialization on + # the result and deserialization decorator before the entrypoint. + return method(*args, **kwargs) + + +@privsep.sys_admin_pctxt.entrypoint +def privsep_setup(cls_name, *args, **kwargs): + """Special privsep method for nvmet setup method calls. + + The setup method is a special case because it's a class method (which + privsep cannot handle) and also requires a function for the error handling. + + This method accepts a class name and reconstructs it, then calls the + class' setup method passing our own error function. + """ + LOG.debug('Setup %s with %s - %s', cls_name, args, kwargs) + cls = getattr(nvmet, cls_name) + args, kwargs = deserialize_params(args, kwargs) + kwargs['err_func'] = _nvmet_setup_failure + cls.setup(*args, **kwargs) + + +################### +# Classes that don't currently have privsep support + +Namespace = nvmet.Namespace +Host = nvmet.Host +Referral = nvmet.Referral +ANAGroup = nvmet.ANAGroup + + +################### +# Custom classes that divert privileges calls to privsep +# Support in these classes is limited to what's needed by the nvmet target. + +# Convenience error class link to nvmet's +NotFound = nvmet.nvme.CFSNotFound + + +class Subsystem(nvmet.Subsystem): + def __init__(self, nqn=None, mode='lookup'): + super().__init__(nqn=nqn, mode=mode) + + @classmethod + def setup(cls, t): + privsep_setup(cls.__name__, t) + + def delete(self): + do_privsep_call(serialize(self), 'delete') + + +class Port(nvmet.Port): + def __init__(self, portid, mode='lookup'): + super().__init__(portid=portid, mode=mode) + + @classmethod + def setup(cls, root, n): + privsep_setup(cls.__name__, serialize(root), n) + + def add_subsystem(self, nqn): + do_privsep_call(serialize(self), 'add_subsystem', nqn) + + def remove_subsystem(self, nqn): + do_privsep_call(serialize(self), 'remove_subsystem', nqn) + + def delete(self): + do_privsep_call(serialize(self), 'delete') + + +class Root(nvmet.Root): + @property + def ports(self): + for d in os.listdir(self.path + '/ports/'): + yield Port(os.path.basename(d)) diff --git a/cinder/tests/unit/privsep/__init__.py b/cinder/tests/unit/privsep/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/privsep/targets/__init__.py b/cinder/tests/unit/privsep/targets/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py b/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py new file mode 100644 index 00000000000..8b0ebd64926 --- /dev/null +++ b/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py @@ -0,0 +1,40 @@ +# Copyright 2022 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +FAKE the nvmet library if it's not installed. +This must be imported before cinder/volume/targets/nvmet.py and +cinder/privsep/targets/nvmet.py +""" + +import sys +from unittest import mock + +from cinder import exception + +try: + import nvmet # noqa + reset_mock = lambda: None # noqa +except ImportError: + mock_nvmet_lib = mock.Mock(name='nvmet', + Root=type('Root', (mock.Mock, ), {}), + Subsystem=type('Subsystem', (mock.Mock, ), {}), + Port=type('Port', (mock.Mock, ), {}), + Namespace=type('Namespace', (mock.Mock, ), {}), + Host=type('Host', (mock.Mock, ), {}), + ANAGroup=type('ANAGroup', (mock.Mock, ), {}), + Referral=type('Referral', (mock.Mock, ), {}), + nvme=mock.Mock(CFSNotFound=exception.NotFound)) + + sys.modules['nvmet'] = mock_nvmet_lib + reset_mock = mock_nvmet_lib.reset_mock diff --git a/cinder/tests/unit/privsep/targets/test_nvmet.py b/cinder/tests/unit/privsep/targets/test_nvmet.py new file mode 100644 index 00000000000..0514efb378b --- /dev/null +++ b/cinder/tests/unit/privsep/targets/test_nvmet.py @@ -0,0 +1,342 @@ +# Copyright 2022 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +import ddt + +from cinder import exception +from cinder.tests.unit.privsep.targets import fake_nvmet_lib +from cinder.tests.unit import test +# This must go after fake_nvmet_lib has been imported (thus the noqa) +from cinder.privsep.targets import nvmet # noqa + + +@ddt.ddt +class TestSerialize(test.TestCase): + def setUp(self): + super().setUp() + fake_nvmet_lib.reset_mock() + + def test_tuple(self): + """Test serialization of a tuple.""" + instance = (1, 'string') + res = nvmet.serialize(instance) + self.assertEqual(('tuple', instance), res) + + @ddt.data(1, 1.1, 'string', None, [1, 2, 'string']) + def test_others(self, instance): + """Test normal Python instances that should not be modified.""" + res = nvmet.serialize(instance) + self.assertEqual(instance, res) + + def test_root(self): + instance = nvmet.Root() + res = nvmet.serialize(instance) + self.assertEqual(('Root', {}), res) + + def test_host(self): + instance = nvmet.Host(nqn='_nqn') + res = nvmet.serialize(instance) + self.assertEqual(('Host', {'nqn': '_nqn', 'mode': 'lookup'}), res) + + def test_subsystem(self): + instance = nvmet.Subsystem(nqn='_nqn') + res = nvmet.serialize(instance) + self.assertEqual(('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}), res) + + def test_namespace(self): + subsys = nvmet.Subsystem(nqn='_nqn') + instance = nvmet.Namespace(subsystem=subsys, nsid='_nsid') + res = nvmet.serialize(instance) + # Subsystem is a recursive serialization + expected = ( + 'Namespace', {'subsystem': ('Subsystem', {'nqn': '_nqn', + 'mode': 'lookup'}), + 'nsid': '_nsid', + 'mode': 'lookup'}) + self.assertEqual(expected, res) + + def test_port(self): + instance = nvmet.Port(portid='_portid') + res = nvmet.serialize(instance) + expected = ('Port', {'portid': '_portid', 'mode': 'lookup'}) + self.assertEqual(expected, res) + + def test_Referral(self): + port = nvmet.Port(portid='_portid') + # name is a Mock attribute, so we'll use it as instance.name + instance = nvmet.Referral(port=port, name='_name') + res = nvmet.serialize(instance) + # Port is a recursive serialization + expected = ( + 'Referral', {'port': ('Port', {'portid': '_portid', + 'mode': 'lookup'}), + 'name': instance.name, + 'mode': 'lookup'}) + self.assertEqual(expected, res) + + def test_ANAGroup(self): + port = nvmet.Port(portid='_portid') + instance = nvmet.ANAGroup(port=port, grpid='_grpid') + res = nvmet.serialize(instance) + expected = ( + 'ANAGroup', {'port': ('Port', {'portid': '_portid', + 'mode': 'lookup'}), + 'grpid': '_grpid', + 'mode': 'lookup'}) + self.assertEqual(expected, res) + + +@ddt.ddt +class TestDeserialize(test.TestCase): + def test_deserialize_tuple(self): + """Test serialization of a tuple.""" + expected = (1, 'string') + data = ('tuple', expected) + res = nvmet.deserialize(data) + self.assertEqual(expected, res) + + @ddt.data(1, 1.1, 'string', None, [1, 2, 'string']) + def test_deserialize_others(self, data): + """Test normal Python instances that should not be modified.""" + res = nvmet.deserialize(data) + self.assertEqual(data, res) + + def test_deserialize_root(self): + data = ('Root', {}) + res = nvmet.deserialize(data) + self.assertIsInstance(res, nvmet.nvmet.Root) + + def test_deserialize_host(self): + data = ('Host', {'nqn': '_nqn', 'mode': 'lookup'}) + host = nvmet.deserialize(data) + self.assertIsInstance(host, nvmet.nvmet.Host) + self.assertEqual('_nqn', host.nqn) + self.assertEqual('lookup', host.mode) + + def test_deserialize_subsystem(self): + data = ('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}) + subsys = nvmet.deserialize(data) + self.assertIsInstance(subsys, nvmet.nvmet.Subsystem) + self.assertEqual('_nqn', subsys.nqn) + self.assertEqual('lookup', subsys.mode) + + def test_deserialize_namespace(self): + data = ('Namespace', {'subsystem': ('Subsystem', {'nqn': '_nqn', + 'mode': 'lookup'}), + 'nsid': '_nsid', + 'mode': 'lookup'}) + + ns = nvmet.deserialize(data) + self.assertIsInstance(ns, nvmet.nvmet.Namespace) + self.assertEqual('_nsid', ns.nsid) + self.assertEqual('lookup', ns.mode) + self.assertIsInstance(ns.subsystem, nvmet.nvmet.Subsystem) + self.assertEqual('_nqn', ns.subsystem.nqn) + self.assertEqual('lookup', ns.subsystem.mode) + + def test_deserialize_port(self): + data = ('Port', {'portid': '_portid', 'mode': 'lookup'}) + port = nvmet.deserialize(data) + self.assertIsInstance(port, nvmet.nvmet.Port) + self.assertEqual('_portid', port.portid) + self.assertEqual('lookup', port.mode) + + def test_deserialize_Referral(self): + data = ('Referral', {'port': ('Port', {'portid': '_portid', + 'mode': 'lookup'}), + 'name': '1', + 'mode': 'lookup'}) + ref = nvmet.deserialize(data) + + self.assertIsInstance(ref, nvmet.nvmet.Referral) + self.assertEqual('1', ref._mock_name) # Because name is used by Mock + self.assertEqual('lookup', ref.mode) + self.assertIsInstance(ref.port, nvmet.nvmet.Port) + self.assertEqual('_portid', ref.port.portid) + self.assertEqual('lookup', ref.port.mode) + + def test_deserialize_ANAGroup(self): + data = ('ANAGroup', {'port': ('Port', {'portid': '_portid', + 'mode': 'lookup'}), + 'grpid': '_grpid', + 'mode': 'lookup'}) + ana = nvmet.deserialize(data) + + self.assertIsInstance(ana, nvmet.nvmet.ANAGroup) + self.assertEqual('_grpid', ana.grpid) + self.assertEqual('lookup', ana.mode) + self.assertIsInstance(ana.port, nvmet.nvmet.Port) + self.assertEqual('_portid', ana.port.portid) + self.assertEqual('lookup', ana.port.mode) + + @mock.patch.object(nvmet, 'deserialize') + def test_deserialize_params(self, mock_deserialize): + mock_deserialize.side_effect = [11, 22, 33, 55, 77] + args = [1, 2, 3] + kwargs = {'4': 5, '6': 7} + + res_args, res_kwargs = nvmet.deserialize_params(args, kwargs) + + self.assertEqual(5, mock_deserialize.call_count) + mock_deserialize.assert_has_calls((mock.call(1), + mock.call(2), + mock.call(3), + mock.call(5), + mock.call(7))) + self.assertEqual([11, 22, 33], res_args) + self.assertEqual({'4': 55, '6': 77}, res_kwargs) + + +class TestPrivsep(test.TestCase): + @mock.patch.object(nvmet.LOG, 'error') + def test__nvmet_setup_failure(self, mock_log): + self.assertRaises(exception.CinderException, + nvmet._nvmet_setup_failure, mock.sentinel.message) + mock_log.assert_called_once_with(mock.sentinel.message) + + # We mock the privsep context mode to fake that we are not the client + @mock.patch('cinder.privsep.sys_admin_pctxt.client_mode', False) + @mock.patch.object(nvmet, 'deserialize_params') + @mock.patch.object(nvmet.nvmet, 'MyClass') + def test_privsep_setup(self, mock_class, mock_deserialize): + args = (1, 2, 3) + kwargs = {'4': 5, '6': 7} + deserialized_args = (11, 22, 33) + deserialized_kwargs = {'4': 55, '6': 77} + + expected_args = deserialized_args[:] + expected_kwargs = deserialized_kwargs.copy() + expected_kwargs['err_func'] = nvmet._nvmet_setup_failure + + mock_deserialize.return_value = (deserialized_args, + deserialized_kwargs) + + nvmet.privsep_setup('MyClass', *args, **kwargs) + + mock_deserialize.assert_called_once_with(args, kwargs) + mock_class.setup.assert_called_once_with(*expected_args, + **expected_kwargs) + + # We mock the privsep context mode to fake that we are not the client + @mock.patch('cinder.privsep.sys_admin_pctxt.client_mode', False) + @mock.patch.object(nvmet, 'deserialize') + @mock.patch.object(nvmet, 'deserialize_params') + def test_do_privsep_call(self, mock_deserialize_params, mock_deserialize): + args = (1, 2, 3) + kwargs = {'4': 5, '6': 7} + deserialized_args = (11, 22, 33) + deserialized_kwargs = {'4': 55, '6': 77} + + mock_deserialize_params.return_value = (deserialized_args, + deserialized_kwargs) + + res = nvmet.do_privsep_call(mock.sentinel.instance, + 'method_name', + *args, **kwargs) + mock_deserialize.assert_called_once_with(mock.sentinel.instance) + mock_deserialize_params.assert_called_once_with(args, kwargs) + + mock_method = mock_deserialize.return_value.method_name + mock_method.assert_called_once_with(*deserialized_args, + **deserialized_kwargs) + self.assertEqual(mock_method.return_value, res) + + +@ddt.ddt +class TestNvmetClasses(test.TestCase): + @ddt.data('Namespace', 'Host', 'Referral', 'ANAGroup') + def test_same_classes(self, cls_name): + self.assertEqual(getattr(nvmet, cls_name), + getattr(nvmet.nvmet, cls_name)) + + def test_subsystem_init(self): + subsys = nvmet.Subsystem('nqn') + self.assertIsInstance(subsys, nvmet.nvmet.Subsystem) + self.assertIsInstance(subsys, nvmet.Subsystem) + self.assertEqual('nqn', subsys.nqn) + self.assertEqual('lookup', subsys.mode) + + @mock.patch.object(nvmet, 'privsep_setup') + def test_subsystem_setup(self, mock_setup): + nvmet.Subsystem.setup(mock.sentinel.t) + mock_setup.assert_called_once_with('Subsystem', mock.sentinel.t) + + @mock.patch.object(nvmet, 'serialize') + @mock.patch.object(nvmet, 'do_privsep_call') + def test_subsystem_delete(self, mock_privsep, mock_serialize): + subsys = nvmet.Subsystem('nqn') + subsys.delete() + mock_serialize.assert_called_once_with(subsys) + mock_privsep.assert_called_once_with(mock_serialize.return_value, + 'delete') + + def test_port_init(self): + port = nvmet.Port('portid') + self.assertIsInstance(port, nvmet.nvmet.Port) + self.assertIsInstance(port, nvmet.Port) + self.assertEqual('portid', port.portid) + self.assertEqual('lookup', port.mode) + + @mock.patch.object(nvmet, 'serialize') + @mock.patch.object(nvmet, 'privsep_setup') + def test_port_setup(self, mock_setup, mock_serialize): + nvmet.Port.setup(mock.sentinel.root, mock.sentinel.n) + mock_serialize.assert_called_once_with(mock.sentinel.root) + mock_setup.assert_called_once_with('Port', mock_serialize.return_value, + mock.sentinel.n) + + @mock.patch.object(nvmet, 'serialize') + @mock.patch.object(nvmet, 'do_privsep_call') + def test_port_add_subsystem(self, mock_privsep, mock_serialize): + port = nvmet.Port('portid') + port.add_subsystem(mock.sentinel.nqn) + mock_serialize.assert_called_once_with(port) + mock_privsep.assert_called_once_with(mock_serialize.return_value, + 'add_subsystem', + mock.sentinel.nqn) + + @mock.patch.object(nvmet, 'serialize') + @mock.patch.object(nvmet, 'do_privsep_call') + def test_port_remove_subsystem(self, mock_privsep, mock_serialize): + port = nvmet.Port('portid') + port.remove_subsystem(mock.sentinel.nqn) + mock_serialize.assert_called_once_with(port) + mock_privsep.assert_called_once_with(mock_serialize.return_value, + 'remove_subsystem', + mock.sentinel.nqn) + + @mock.patch.object(nvmet, 'serialize') + @mock.patch.object(nvmet, 'do_privsep_call') + def test_port_delete(self, mock_privsep, mock_serialize): + port = nvmet.Port('portid') + port.delete() + mock_serialize.assert_called_once_with(port) + mock_privsep.assert_called_once_with(mock_serialize.return_value, + 'delete') + + @mock.patch('os.listdir', return_value=['/path/ports/1', '/path/ports/2']) + @mock.patch.object(nvmet, 'Port') + def test_root_ports(self, mock_port, mock_listdir): + r = nvmet.Root() + r.path = '/path' # This is set by the parent nvmet library Root class + + res = list(r.ports) + + self.assertEqual([mock_port.return_value, mock_port.return_value], res) + + mock_listdir.assert_called_once_with('/path/ports/') + self.assertEqual(2, mock_port.call_count) + mock_port.assert_has_calls((mock.call('1'), mock.call('2'))) diff --git a/cinder/tests/unit/targets/test_nvmet_driver.py b/cinder/tests/unit/targets/test_nvmet_driver.py index 8d9141bfc8c..26241653e08 100644 --- a/cinder/tests/unit/targets/test_nvmet_driver.py +++ b/cinder/tests/unit/targets/test_nvmet_driver.py @@ -13,12 +13,13 @@ from unittest import mock import ddt -from oslo_utils import timeutils -from cinder import context +from cinder.tests.unit.privsep.targets import fake_nvmet_lib from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import nvmet +# This must go after fake_nvmet_lib has been imported (thus the noqa) +from cinder.privsep.targets import nvmet as priv_nvmet # noqa @ddt.ddt @@ -26,248 +27,241 @@ class TestNVMETDriver(tf.TargetDriverFixture): def setUp(self): super(TestNVMETDriver, self).setUp() - self.initialize('nvmet_rdma', 'rdma') - def initialize(self, target_protocol, transport_type): - self.configuration.target_protocol = target_protocol + self.configuration.target_prefix = 'nvme-subsystem-1' + self.configuration.target_protocol = 'nvmet_rdma' self.target = nvmet.NVMET(root_helper=utils.get_root_helper(), configuration=self.configuration) + fake_nvmet_lib.reset_mock() - self.target_ip = '192.168.0.1' - self.target_port = '1234' - self.nvmet_subsystem_name = self.configuration.target_prefix - self.nvmet_ns_id = self.configuration.nvmet_ns_id - self.nvmet_port_id = self.configuration.nvmet_port_id - self.nvme_transport_type = transport_type + @mock.patch.object(nvmet.NVMET, 'create_nvmeof_target') + def test_create_export(self, mock_create_target): + """Test that the nvmeof class calls the nvmet method.""" + res = self.target.create_export(mock.sentinel.ctxt, + self.testvol, + mock.sentinel.volume_path) + self.assertEqual(mock_create_target.return_value, res) + mock_create_target.assert_called_once_with( + self.testvol['id'], + self.target.configuration.target_prefix, + self.target.target_ip, + self.target.target_port, + self.target.nvme_transport_type, + self.target.nvmet_port_id, + self.target.nvmet_ns_id, + mock.sentinel.volume_path) - self.fake_volume_id = 'c446b9a2-c968-4260-b95f-a18a7b41c004' - self.testvol_path = ( - '/dev/stack-volumes-lvmdriver-1/volume-%s' % self.fake_volume_id) - self.fake_project_id = 'ed2c1fd4-5555-1111-aa15-123b93f75cba' - self.testvol = ( - {'project_id': self.fake_project_id, - 'name': 'testvol', - 'size': 1, - 'id': self.fake_volume_id, - 'volume_type_id': None, - 'provider_location': self.target.get_nvmeof_location( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - self.fake_volume_id), - self.target_ip, self.target_port, self.nvme_transport_type, - self.nvmet_ns_id), - 'provider_auth': None, - 'provider_geometry': None, - 'created_at': timeutils.utcnow(), - 'host': 'fake_host@lvm#lvm'}) + @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location') + @mock.patch.object(nvmet.NVMET, '_ensure_port_exports') + @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists') + @mock.patch.object(nvmet.NVMET, '_get_target_nqn') + def test_create_nvmeof_target(self, mock_nqn, mock_subsys, mock_port, + mock_location): + """Normal create target execution.""" + mock_nqn.return_value = mock.sentinel.nqn - @ddt.data(('nvmet_rdma', 'rdma'), ('nvmet_tcp', 'tcp')) + res = self.target.create_nvmeof_target(mock.sentinel.vol_id, + mock.sentinel.target_prefix, + mock.sentinel.target_ip, + mock.sentinel.target_port, + mock.sentinel.transport_type, + mock.sentinel.port_id, + mock.sentinel.ns_id, + mock.sentinel.volume_path) + + self.assertEqual({'location': mock_location.return_value, 'auth': ''}, + res) + mock_nqn.assert_called_once_with(mock.sentinel.vol_id) + mock_subsys.assert_called_once_with(mock.sentinel.nqn, + mock.sentinel.ns_id, + mock.sentinel.volume_path) + mock_port.assert_called_once_with(mock.sentinel.nqn, + mock.sentinel.target_ip, + mock.sentinel.target_port, + mock.sentinel.transport_type, + mock.sentinel.port_id) + + mock_location.assert_called_once_with(mock.sentinel.nqn, + mock.sentinel.target_ip, + mock.sentinel.target_port, + mock.sentinel.transport_type, + mock.sentinel.ns_id) + + @ddt.data((ValueError, None), (None, IndexError)) @ddt.unpack - @mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem') - @mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems') - @mock.patch.object(nvmet.NVMET, '_add_nvmf_subsystem') - def test_create_export(self, target_protocol, transport_type, - mock_add_nvmf_subsystem, - mock_get_available_nvmf_subsystems, - mock_get_nvmf_subsystem): - self.initialize(target_protocol, transport_type) + @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location') + @mock.patch.object(nvmet.NVMET, '_ensure_port_exports') + @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists') + @mock.patch.object(nvmet.NVMET, '_get_target_nqn') + def test_create_nvmeof_target_error(self, subsys_effect, port_effect, + mock_nqn, mock_subsys, mock_port, + mock_location): + """Failing create target executing subsystem or port creation.""" + mock_subsys.side_effect = subsys_effect + mock_port.side_effect = port_effect + mock_nqn.return_value = mock.sentinel.nqn - mock_testvol = self.testvol - mock_testvol_path = self.testvol_path - ctxt = context.get_admin_context() - mock_get_available_nvmf_subsystems.return_value = { - "subsystems": [], - "hosts": [], - "ports": [ - {"subsystems": [], - "referrals": [], - "portid": 1, - "addr": - {"treq": "not specified", - "trtype": self.nvme_transport_type, - "adrfam": "ipv4", - "trsvcid": self.target_port, - "traddr": - self.target_ip - } - }] - } - mock_get_nvmf_subsystem.return_value = ( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])) - - mock_add_nvmf_subsystem.return_value = ( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])) - - expected_return = { - 'location': self.target.get_nvmeof_location( - mock_add_nvmf_subsystem.return_value, self.target_ip, - self.target_port, self.nvme_transport_type, self.nvmet_ns_id), - 'auth': '' - } - - self.target.target_ip = self.target_ip - self.target.target_port = self.target_port - self.assertEqual(expected_return, - self.target.create_export( - ctxt, mock_testvol, - mock_testvol_path)) - - @mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem') - @mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems') - @mock.patch.object(nvmet.NVMET, '_add_nvmf_subsystem') - def test_create_export_with_error_add_nvmf_subsystem( - self, - mock_add_nvmf_subsystem, - mock_get_available_nvmf_subsystems, - mock_get_nvmf_subsystem): - - mock_testvol = self.testvol - mock_testvol_path = self.testvol_path - ctxt = context.get_admin_context() - mock_get_available_nvmf_subsystems.return_value = { - "subsystems": [], - "hosts": [], - "ports": [ - {"subsystems": [], - "referrals": [], - "portid": 1, - "addr": - {"treq": "not specified", - "trtype": self.nvme_transport_type, - "adrfam": "ipv4", - "trsvcid": self.target_port, - "traddr": - self.target_ip - } - }] - } - mock_get_nvmf_subsystem.return_value = None - - mock_add_nvmf_subsystem.return_value = None - - self.target.target_ip = self.target_ip - self.target.target_port = self.target_port self.assertRaises(nvmet.NVMETTargetAddError, - self.target.create_export, - ctxt, - mock_testvol, - mock_testvol_path) + self.target.create_nvmeof_target, + mock.sentinel.vol_id, + mock.sentinel.target_prefix, + mock.sentinel.target_ip, + mock.sentinel.target_port, + mock.sentinel.transport_type, + mock.sentinel.port_id, + mock.sentinel.ns_id, + mock.sentinel.volume_path) - @mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem') - @mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems') - @mock.patch.object(nvmet.NVMET, '_delete_nvmf_subsystem') - def test_remove_export(self, mock_delete_nvmf_subsystem, - mock_get_available_nvmf_subsystems, - mock_get_nvmf_subsystem): - mock_testvol = self.testvol - mock_testvol_path = self.testvol_path - ctxt = context.get_admin_context() - mock_get_available_nvmf_subsystems.return_value = { - "subsystems": [ - {"allowed_hosts": [], - "nqn": "nqn.%s-%s" % ( - self.nvmet_subsystem_name, - mock_testvol['id']), - "attr": {"allow_any_host": "1"}, - "namespaces": [ - {"device": - {"path": mock_testvol_path, - "nguid": - "86fab0e0-825d-4f25-a449-28b93c5e8dd6" - }, - "enable": 1, "nsid": - self.nvmet_ns_id, - }]}], - "hosts": [], - "ports": [ - {"subsystems": [ - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])], - "referrals": [], - "portid": self.nvmet_port_id, - "addr": - {"treq": "not specified", - "trtype": self.nvme_transport_type, - "adrfam": "ipv4", - "trsvcid": self.target_port, - "traddr": self.target_ip}} - ] + mock_nqn.assert_called_once_with(mock.sentinel.vol_id) + mock_subsys.assert_called_once_with(mock.sentinel.nqn, + mock.sentinel.ns_id, + mock.sentinel.volume_path) + if subsys_effect: + mock_port.assert_not_called() + else: + mock_port.assert_called_once_with(mock.sentinel.nqn, + mock.sentinel.target_ip, + mock.sentinel.target_port, + mock.sentinel.transport_type, + mock.sentinel.port_id) + mock_location.assert_not_called() + + @mock.patch.object(priv_nvmet, 'Subsystem') + def test__ensure_subsystem_exists_already_exists(self, mock_subsys): + """Skip subsystem creation if already exists.""" + nqn = 'nqn.nvme-subsystem-1-uuid' + self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id, + mock.sentinel.vol_path) + mock_subsys.assert_called_once_with(nqn) + mock_subsys.setup.assert_not_called() + + @mock.patch('oslo_utils.uuidutils.generate_uuid') + @mock.patch.object(priv_nvmet, 'Subsystem') + def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid): + """Create subsystem when it doesn't exist.""" + mock_subsys.side_effect = priv_nvmet.NotFound + mock_uuid.return_value = 'uuid' + nqn = 'nqn.nvme-subsystem-1-uuid' + self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id, + mock.sentinel.vol_path) + mock_subsys.assert_called_once_with(nqn) + expected_section = { + 'allowed_hosts': [], + 'attr': {'allow_any_host': '1'}, + 'namespaces': [{'device': {'nguid': 'uuid', + 'path': mock.sentinel.vol_path}, + 'enable': 1, + 'nsid': mock.sentinel.ns_id}], + 'nqn': nqn } + mock_subsys.setup.assert_called_once_with(expected_section) - mock_get_nvmf_subsystem.return_value = ( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])) - mock_delete_nvmf_subsystem.return_value = ( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])) - expected_return = mock_delete_nvmf_subsystem.return_value - self.assertEqual(expected_return, - self.target.remove_export(ctxt, mock_testvol)) + @mock.patch.object(priv_nvmet, 'Port') + def test__ensure_port_exports_already_does(self, mock_port): + """Skips port creation and subsystem export since they both exist.""" + nqn = 'nqn.nvme-subsystem-1-uuid' + mock_port.return_value.subsystems = [nqn] + self.target._ensure_port_exports(nqn, + mock.sentinel.addr, + mock.sentinel.port, + mock.sentinel.transport, + mock.sentinel.port_id) + mock_port.assert_called_once_with(mock.sentinel.port_id) + mock_port.setup.assert_not_called() + mock_port.return_value.add_subsystem.assert_not_called() - @mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem') - @mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems') - def test_remove_export_with_empty_subsystems( - self, - mock_get_available_nvmf_subsystems, - mock_get_nvmf_subsystem): - mock_testvol = self.testvol - ctxt = context.get_admin_context() - mock_get_available_nvmf_subsystems.return_value = { - "subsystems": [], - "hosts": [], - "ports": [] - } - mock_get_nvmf_subsystem.return_value = None - self.assertIsNone(self.target.remove_export(ctxt, mock_testvol)) + @mock.patch.object(priv_nvmet, 'Port') + def test__ensure_port_exports_port_exists_not_exported(self, mock_port): + """Skips port creation if exists but exports subsystem.""" + nqn = 'nqn.nvme-subsystem-1-vol-2-uuid' + mock_port.return_value.subsystems = ['nqn.nvme-subsystem-1-vol-1-uuid'] + self.target._ensure_port_exports(nqn, + mock.sentinel.addr, + mock.sentinel.port, + mock.sentinel.transport, + mock.sentinel.port_id) + mock_port.assert_called_once_with(mock.sentinel.port_id) + mock_port.setup.assert_not_called() + mock_port.return_value.add_subsystem.assert_called_once_with(nqn) - @mock.patch.object(nvmet.NVMET, '_get_nvmf_subsystem') - @mock.patch.object(nvmet.NVMET, '_get_available_nvmf_subsystems') - @mock.patch.object(nvmet.NVMET, '_delete_nvmf_subsystem') - def test_remove_export_with_delete_nvmf_subsystem_fails( - self, - moc_delete_nvmf_subsystem, - mock_get_available_nvmf_subsystems, - mock_get_nvmf_subsystem): - mock_testvol = self.testvol - mock_testvol_path = self.testvol_path - ctxt = context.get_admin_context() - mock_get_available_nvmf_subsystems.return_value = { - "subsystems": [ - {"allowed_hosts": [], - "nqn": "nqn.%s-%s" % ( - self.nvmet_subsystem_name, - mock_testvol['id']), - "attr": {"allow_any_host": "1"}, - "namespaces": [ - {"device": - {"path": mock_testvol_path, - "nguid": - "86fab0e0-825d-4f25-a449-28b93c5e8dd6" - }, - "enable": 1, "nsid": - self.nvmet_ns_id, - }]}], - "hosts": [], - "ports": [ - {"subsystems": [ - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])], - "referrals": [], - "portid": self.nvmet_port_id, - "addr": - {"treq": "not specified", - "trtype": self.nvme_transport_type, - "adrfam": "ipv4", - "trsvcid": self.target_port, - "traddr": self.target_ip}} - ] - } - mock_get_nvmf_subsystem.return_value = ( - "nqn.%s-%s" % (self.nvmet_subsystem_name, - mock_testvol['id'])) - moc_delete_nvmf_subsystem.return_value = None - self.assertRaises(nvmet.NVMETTargetDeleteError, - self.target.remove_export, - ctxt, - mock_testvol) + @mock.patch.object(priv_nvmet, 'Port') + def test__ensure_port_exports_port(self, mock_port): + """Creates the port and export the subsystem when they don't exist.""" + nqn = 'nqn.nvme-subsystem-1-vol-2-uuid' + mock_port.side_effect = priv_nvmet.NotFound + self.target._ensure_port_exports(nqn, + mock.sentinel.addr, + mock.sentinel.port, + mock.sentinel.transport, + mock.sentinel.port_id) + mock_port.assert_called_once_with(mock.sentinel.port_id) + new_port = {'addr': {'adrfam': 'ipv4', + 'traddr': mock.sentinel.addr, + 'treq': 'not specified', + 'trsvcid': mock.sentinel.port, + 'trtype': mock.sentinel.transport}, + 'portid': mock.sentinel.port_id, + 'referrals': [], + 'subsystems': [nqn]} + mock_port.setup.assert_called_once_with(self.target._nvmet_root, + new_port) + mock_port.return_value.assert_not_called() + + @mock.patch.object(nvmet.NVMET, 'delete_nvmeof_target') + def test_remove_export(self, mock_delete_target): + """Test that the nvmeof class calls the nvmet method.""" + res = self.target.remove_export(mock.sentinel.ctxt, + mock.sentinel.volume) + self.assertEqual(mock_delete_target.return_value, res) + mock_delete_target.assert_called_once_with(mock.sentinel.volume) + + @mock.patch.object(priv_nvmet, 'Subsystem') + @mock.patch.object(nvmet.NVMET, '_get_target_nqn') + def test_delete_nvmeof_target_nothing_present(self, mock_nqn, mock_subsys): + """Delete doesn't do anything because there is nothing to do.""" + mock_nqn.return_value = mock.sentinel.nqn + mock_subsys.side_effect = priv_nvmet.NotFound + + port1 = mock.Mock(subsystems=[]) + port2 = mock.Mock(subsystems=['subs1']) + self.mock_object(priv_nvmet.Root, 'ports', [port1, port2]) + + volume = mock.Mock(id='vol-uuid') + self.target.delete_nvmeof_target(volume) + + mock_nqn.assert_called_once_with(volume.id) + port1.remove_subsystem.assert_not_called() + port2.remove_subsystem.assert_not_called() + mock_subsys.delete.assert_not_called() + + @mock.patch.object(priv_nvmet, 'Subsystem') + @mock.patch.object(nvmet.NVMET, '_get_target_nqn') + def test_delete_nvmeof_target(self, mock_nqn, mock_subsys): + """Delete removes subsystems from port and the subsystem.""" + mock_nqn.return_value = mock.sentinel.nqn + + port1 = mock.Mock(subsystems=[]) + port2 = mock.Mock(subsystems=[mock.sentinel.nqn]) + port3 = mock.Mock(subsystems=['subs1']) + self.mock_object(priv_nvmet.Root, 'ports', [port1, port2, port3]) + + volume = mock.Mock(id='vol-uuid') + self.target.delete_nvmeof_target(volume) + + mock_nqn.assert_called_once_with(volume.id) + port1.remove_subsystem.assert_not_called() + port2.remove_subsystem.assert_called_once_with(mock.sentinel.nqn) + port3.remove_subsystem.assert_not_called() + mock_subsys.assert_called_once_with(mock.sentinel.nqn) + mock_subsys.return_value.delete.assert_called_once_with() + + @mock.patch.object(priv_nvmet, 'Root') + def test__get_available_nvmf_subsystems(self, mock_root): + res = self.target._get_available_nvmf_subsystems() + mock_dump = mock_root.return_value.dump + self.assertEqual(mock_dump.return_value, res) + mock_dump.assert_called_once_with() + + def test__get_target_nqn(self): + res = self.target._get_target_nqn('volume_id') + self.assertEqual('nqn.nvme-subsystem-1-volume_id', res) diff --git a/cinder/volume/targets/nvmet.py b/cinder/volume/targets/nvmet.py index 5ebc73064dd..f4e88769c51 100644 --- a/cinder/volume/targets/nvmet.py +++ b/cinder/volume/targets/nvmet.py @@ -10,17 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import tempfile - -from oslo_concurrency import processutils as putils from oslo_log import log as logging -from oslo_serialization import jsonutils as json -from oslo_utils import excutils from oslo_utils import uuidutils from cinder import exception -from cinder.privsep import nvmcli -import cinder.privsep.path +from cinder.privsep.targets import nvmet from cinder import utils from cinder.volume.targets import nvmeof @@ -38,81 +32,49 @@ class NVMETTargetDeleteError(exception.CinderException): class NVMET(nvmeof.NVMeOF): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._nvmet_root = nvmet.Root() + @utils.synchronized('nvmetcli', external=True) def create_nvmeof_target(self, volume_id, - subsystem_name, + subsystem_name, # Ignoring this, using config target_ip, target_port, transport_type, nvmet_port_id, ns_id, volume_path): - # Create NVME subsystem for previously created LV - nvmf_subsystems = self._get_available_nvmf_subsystems() + nqn = self._get_target_nqn(volume_id) + try: + self._ensure_subsystem_exists(nqn, ns_id, volume_path) + self._ensure_port_exports(nqn, target_ip, target_port, + transport_type, nvmet_port_id) + except Exception: + LOG.error('Failed to add subsystem: %s', nqn) + raise NVMETTargetAddError(subsystem=nqn) - # Check if subsystem already exists - search_for_subsystem = self._get_nvmf_subsystem( - nvmf_subsystems, volume_id) - if search_for_subsystem is None: - newly_added_subsystem = self._add_nvmf_subsystem( - nvmf_subsystems, - target_ip, - target_port, - nvmet_port_id, - subsystem_name, - ns_id, volume_id, volume_path, transport_type) - if newly_added_subsystem is None: - LOG.error('Failed to add subsystem: %s', subsystem_name) - raise NVMETTargetAddError(subsystem=subsystem_name) - LOG.info('Added subsystem: %s', newly_added_subsystem) - search_for_subsystem = newly_added_subsystem - else: - LOG.debug('Skip creating subsystem %s as ' - 'it already exists.', search_for_subsystem) + LOG.info('Subsystem %s now exported on port %s', nqn, target_port) return { 'location': self.get_nvmeof_location( - search_for_subsystem, + nqn, target_ip, target_port, transport_type, ns_id), 'auth': ''} - def _restore(self, nvmf_subsystems): - # Dump updated JSON dict to append new subsystem - with tempfile.NamedTemporaryFile(mode='w') as tmp_fd: - tmp_fd.write(json.dumps(nvmf_subsystems)) - tmp_fd.flush() - try: - out, err = nvmcli.restore(tmp_fd.name) - except putils.ProcessExecutionError: - with excutils.save_and_reraise_exception(): - LOG.exception('Error from nvmetcli restore') + def _ensure_subsystem_exists(self, nqn, nvmet_ns_id, volume_path): + # Assume if subsystem exists, it has the right configuration + try: + nvmet.Subsystem(nqn) + LOG.debug('Skip creating subsystem %s as it already exists.', nqn) + return + except nvmet.NotFound: + LOG.debug('Creating subsystem %s.', nqn) - def _add_nvmf_subsystem(self, nvmf_subsystems, target_ip, target_port, - nvmet_port_id, nvmet_subsystem_name, nvmet_ns_id, - volume_id, volume_path, transport_type): - - subsystem_name = self._get_target_info(nvmet_subsystem_name, volume_id) - # Create JSON sections for the new subsystem to be created - # Port section - port_section = { - "addr": { - "adrfam": "ipv4", - "traddr": target_ip, - "treq": "not specified", - "trsvcid": target_port, - "trtype": transport_type, - }, - "portid": nvmet_port_id, - "referrals": [], - "subsystems": [subsystem_name] - } - nvmf_subsystems['ports'].append(port_section) - - # Subsystem section subsystem_section = { "allowed_hosts": [], "attr": { @@ -128,92 +90,70 @@ class NVMET(nvmeof.NVMeOF): "nsid": nvmet_ns_id } ], - "nqn": subsystem_name} - nvmf_subsystems['subsystems'].append(subsystem_section) + "nqn": nqn} - LOG.info( - 'Trying to load the following subsystems: %s', nvmf_subsystems) + nvmet.Subsystem.setup(subsystem_section) # privsep + LOG.debug('Added subsystem: %s', nqn) - self._restore(nvmf_subsystems) + def _ensure_port_exports(self, nqn, addr, port, transport_type, port_id): + # Assume if port exists, it has the right configuration + try: + port = nvmet.Port(port_id) + LOG.debug('Skip creating port %s as it already exists.', port_id) + except nvmet.NotFound: + LOG.debug('Creating port %s.', port_id) - return subsystem_name + # Port section + port_section = { + "addr": { + "adrfam": "ipv4", + "traddr": addr, + "treq": "not specified", + "trsvcid": port, + "trtype": transport_type, + }, + "portid": port_id, + "referrals": [], + "subsystems": [nqn] + } + nvmet.Port.setup(self._nvmet_root, port_section) # privsep + LOG.debug('Added port: %s', port_id) + + else: + if nqn in port.subsystems: + LOG.debug('%s already exported on port %s', nqn, port_id) + else: + port.add_subsystem(nqn) # privsep + LOG.debug('Exported %s on port %s', nqn, port_id) @utils.synchronized('nvmetcli', external=True) def delete_nvmeof_target(self, volume): - nvmf_subsystems = self._get_available_nvmf_subsystems() - subsystem_name = self._get_nvmf_subsystem( - nvmf_subsystems, volume['id']) - if subsystem_name: - removed_subsystem = self._delete_nvmf_subsystem( - nvmf_subsystems, subsystem_name) - if removed_subsystem is None: - LOG.error( - 'Failed to delete subsystem: %s', subsystem_name) - raise NVMETTargetDeleteError(subsystem=subsystem_name) - elif removed_subsystem == subsystem_name: - LOG.info( - 'Managed to delete subsystem: %s', subsystem_name) - return removed_subsystem - else: - LOG.info("Skipping remove_export. No NVMe subsystem " - "for volume: %s", volume['id']) + subsystem_name = self._get_target_nqn(volume.id) + LOG.debug('Removing subsystem: %s', subsystem_name) - def _delete_nvmf_subsystem(self, nvmf_subsystems, subsystem_name): - LOG.debug( - 'Removing this subsystem: %s', subsystem_name) + for port in self._nvmet_root.ports: + if subsystem_name in port.subsystems: + LOG.debug('Removing %s from port %s', + subsystem_name, port.portid) + port.remove_subsystem(subsystem_name) - for port in nvmf_subsystems['ports']: - if subsystem_name in port['subsystems']: - port['subsystems'].remove(subsystem_name) - break - for subsys in nvmf_subsystems['subsystems']: - if subsys['nqn'] == subsystem_name: - nvmf_subsystems['subsystems'].remove(subsys) - break - - LOG.debug( - 'Newly loaded subsystems will be: %s', nvmf_subsystems) - self._restore(nvmf_subsystems) - return subsystem_name - - def _get_nvmf_subsystem(self, nvmf_subsystems, volume_id): - subsystem_name = self._get_target_info( - self.nvmet_subsystem_name, volume_id) - for subsys in nvmf_subsystems['subsystems']: - if subsys['nqn'] == subsystem_name: - return subsystem_name + try: + subsys = nvmet.Subsystem(subsystem_name) + LOG.debug('Deleting %s', subsystem_name) + subsys.delete() # privsep call + LOG.info('Subsystem %s removed', subsystem_name) + except nvmet.NotFound: + LOG.info('Skipping remove_export. No NVMe subsystem for volume: ' + '%s', volume.id) + except Exception: + LOG.error('Failed to delete subsystem: %s', subsystem_name) + raise NVMETTargetDeleteError(subsystem=subsystem_name) + LOG.info('Volume %s is no longer exported', volume.id) def _get_available_nvmf_subsystems(self): - __, tmp_file_path = tempfile.mkstemp(prefix='nvmet') + nvme_root = nvmet.Root() + subsystems = nvme_root.dump() + return subsystems - # nvmetcli doesn't support printing to stdout yet, - try: - out, err = nvmcli.save(tmp_file_path) - except putils.ProcessExecutionError: - with excutils.save_and_reraise_exception(): - LOG.exception('Error from nvmetcli save') - self._delete_file(tmp_file_path) - - # temp file must be readable by this process user - # in order to avoid executing cat as root - with utils.temporary_chown(tmp_file_path): - try: - out = cinder.privsep.path.readfile(tmp_file_path) - except putils.ProcessExecutionError: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to read: %s', tmp_file_path) - self._delete_file(tmp_file_path) - nvmf_subsystems = json.loads(out) - - self._delete_file(tmp_file_path) - - return nvmf_subsystems - - def _get_target_info(self, subsystem, volume_id): - return "nqn.%s-%s" % (subsystem, volume_id) - - def _delete_file(self, file_path): - try: - cinder.privsep.path.removefile(file_path) - except putils.ProcessExecutionError: - LOG.exception('Failed to delete file: %s', file_path) + def _get_target_nqn(self, volume_id): + return "nqn.%s-%s" % (self.nvmet_subsystem_name, volume_id) diff --git a/doc/source/conf.py b/doc/source/conf.py index c2ea48cf264..c311f4e9166 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -73,6 +73,9 @@ config_generator_config_file = ( '../../tools/config/cinder-config-generator.conf') sample_config_basename = '_static/cinder' +# These are driver specific libraries that are not always present +autodoc_mock_imports = ['nvmet'] + policy_generator_config_file = ( '../../tools/config/cinder-policy-generator.conf') sample_policy_basename = '_static/cinder' diff --git a/releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml b/releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml new file mode 100644 index 00000000000..a1cc29d83dc --- /dev/null +++ b/releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + LVM nvmet target `bug #1964391 + `_: Fixed temporary + disconnection of all volumes from all hosts when creating and removing + volume exports. + + - | + LVM nvmet target `bug #1964394 + `_: Fixed annoying kernel + log message when exporting a volume.