diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample index 0f092b718f..796358e9fd 100644 --- a/etc/ironic/ironic.conf.sample +++ b/etc/ironic/ironic.conf.sample @@ -1518,12 +1518,16 @@ #auth_strategy = keystone # Neutron network UUID for the ramdisk to be booted into for -# cleaning nodes. Required if cleaning (either automatic or -# manual) is run for flat network interface, and, if DHCP -# providers are still being used, for neutron DHCP provider. -# (string value) +# cleaning nodes. Required for "neutron" network interface. It +# is also required if cleaning nodes when using "flat" network +# interface or "neutron" DHCP provider. (string value) #cleaning_network_uuid = +# Neutron network UUID for the ramdisk to be booted into for +# provisioning nodes. Required for "neutron" network +# interface. (string value) +#provisioning_network_uuid = + [oneview] diff --git a/ironic/common/network.py b/ironic/common/network.py index 301384bf92..78aabd58e0 100644 --- a/ironic/common/network.py +++ b/ironic/common/network.py @@ -32,15 +32,19 @@ def get_node_vif_ids(task): portgroup_vifs = {} port_vifs = {} for portgroup in task.portgroups: - # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist - # when we're in deployment/tenant network + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant + # network vif = (portgroup.internal_info.get('cleaning_vif_port_id') or + portgroup.internal_info.get('provisioning_vif_port_id') or portgroup.extra.get('vif_port_id')) if vif: portgroup_vifs[portgroup.uuid] = vif vifs['portgroups'] = portgroup_vifs for port in task.ports: vif = (port.internal_info.get('cleaning_vif_port_id') or + port.internal_info.get('provisioning_vif_port_id') or port.extra.get('vif_port_id')) if vif: port_vifs[port.uuid] = vif diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py index fc200207be..780d4d1113 100644 --- a/ironic/common/neutron.py +++ b/ironic/common/neutron.py @@ -51,10 +51,14 @@ neutron_opts = [ 'should only be used for testing.')), cfg.StrOpt('cleaning_network_uuid', help=_('Neutron network UUID for the ramdisk to be booted ' - 'into for cleaning nodes. Required if cleaning (either ' - 'automatic or manual) is run for flat network interface,' - ' and, if DHCP providers are still being used, for ' - 'neutron DHCP provider.')) + 'into for cleaning nodes. Required for "neutron" ' + 'network interface. It is also required if cleaning ' + 'nodes when using "flat" network interface or "neutron" ' + 'DHCP provider.')), + cfg.StrOpt('provisioning_network_uuid', + help=_('Neutron network UUID for the ramdisk to be booted ' + 'into for provisioning nodes. Required for "neutron" ' + 'network interface.')), ] CONF.register_opts(neutron_opts, group='neutron') diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py index 084300d1a7..a5027e05f6 100644 --- a/ironic/dhcp/neutron.py +++ b/ironic/dhcp/neutron.py @@ -208,9 +208,12 @@ class NeutronDHCPApi(base.BaseDHCP): :raises: InvalidIPv4Address """ - # NOTE(vdrok): This works because cleaning_vif_port_id doesn't exist - # when we're in deployment/tenant network + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant + # network vif = (p_obj.internal_info.get('cleaning_vif_port_id') or + p_obj.internal_info.get('provisioning_vif_port_id') or p_obj.extra.get('vif_port_id')) if not vif: obj_name = 'portgroup' diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py index 2a23b39d4e..87aaf65de1 100644 --- a/ironic/drivers/modules/deploy_utils.py +++ b/ironic/drivers/modules/deploy_utils.py @@ -518,8 +518,12 @@ def get_single_nic_with_vif_port_id(task): :returns: MAC address of the port connected to deployment network. None if it cannot find any port with vif id. """ + # NOTE(vdrok): We are booting the node only in one network at a time, + # and presence of cleaning_vif_port_id means we're doing cleaning, of + # provisioning_vif_port_id - provisioning. Otherwise it's a tenant network for port in task.ports: if (port.internal_info.get('cleaning_vif_port_id') or + port.internal_info.get('provisioning_vif_port_id') or port.extra.get('vif_port_id')): return port.address diff --git a/ironic/drivers/modules/network/neutron.py b/ironic/drivers/modules/network/neutron.py new file mode 100644 index 0000000000..5b8daaf5d5 --- /dev/null +++ b/ironic/drivers/modules/network/neutron.py @@ -0,0 +1,212 @@ +# Copyright 2015 Rackspace, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutronclient.common import exceptions as neutron_exceptions +from oslo_config import cfg +from oslo_log import log +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common.i18n import _LI +from ironic.common.i18n import _LW +from ironic.common import neutron +from ironic.drivers import base +from ironic import objects + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class NeutronNetwork(base.NetworkInterface): + """Neutron v2 network interface""" + + def __init__(self): + failures = [] + cleaning_net = CONF.neutron.cleaning_network_uuid + if not uuidutils.is_uuid_like(cleaning_net): + failures.append('cleaning_network_uuid=%s' % cleaning_net) + + provisioning_net = CONF.neutron.provisioning_network_uuid + if not uuidutils.is_uuid_like(provisioning_net): + failures.append('provisioning_network_uuid=%s' % provisioning_net) + + if failures: + raise exception.DriverLoadError( + driver=self.__class__.__name__, + reason=(_('The following [neutron] group configuration ' + 'options are incorrect, they must be valid UUIDs: ' + '%s') % ', '.join(failures))) + + def add_provisioning_network(self, task): + """Add the provisioning network to a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Adding provisioning network to node %s'), + task.node.uuid) + vifs = neutron.add_ports_to_network( + task, CONF.neutron.provisioning_network_uuid) + for port in task.ports: + if port.uuid in vifs: + internal_info = port.internal_info + internal_info['provisioning_vif_port_id'] = vifs[port.uuid] + port.internal_info = internal_info + port.save() + + def remove_provisioning_network(self, task): + """Remove the provisioning network from a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Removing provisioning network from node %s'), + task.node.uuid) + neutron.remove_ports_from_network( + task, CONF.neutron.provisioning_network_uuid) + for port in task.ports: + if 'provisioning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['provisioning_vif_port_id'] + port.internal_info = internal_info + port.save() + + def add_cleaning_network(self, task): + """Create neutron ports for each port on task.node to boot the ramdisk. + + :param task: a TaskManager instance. + :raises: NetworkError + :returns: a dictionary in the form {port.uuid: neutron_port['id']} + """ + # If we have left over ports from a previous cleaning, remove them + neutron.rollback_ports(task, CONF.neutron.cleaning_network_uuid) + LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid) + vifs = neutron.add_ports_to_network(task, + CONF.neutron.cleaning_network_uuid) + for port in task.ports: + if port.uuid in vifs: + internal_info = port.internal_info + internal_info['cleaning_vif_port_id'] = vifs[port.uuid] + port.internal_info = internal_info + port.save() + return vifs + + def remove_cleaning_network(self, task): + """Deletes the neutron port created for booting the ramdisk. + + :param task: a TaskManager instance. + :raises: NetworkError + """ + LOG.info(_LI('Removing cleaning network from node %s'), + task.node.uuid) + neutron.remove_ports_from_network( + task, CONF.neutron.cleaning_network_uuid) + for port in task.ports: + if 'cleaning_vif_port_id' in port.internal_info: + internal_info = port.internal_info + del internal_info['cleaning_vif_port_id'] + port.internal_info = internal_info + port.save() + + def configure_tenant_networks(self, task): + """Configure tenant networks for a node. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + node = task.node + ports = task.ports + LOG.info(_LI('Mapping instance ports to %s'), node.uuid) + + # TODO(russell_h): this is based on the broken assumption that the + # number of Neutron ports will match the number of physical ports. + # Instead, we should probably list ports for this instance in + # Neutron and update all of those with the appropriate portmap. + if not ports: + msg = _("No ports are associated with node %s") % node.uuid + LOG.error(msg) + raise exception.NetworkError(msg) + ports = [p for p in ports if not p.portgroup_id] + portgroups = task.portgroups + + portmap = neutron.get_node_portmap(task) + + client = neutron.get_client(task.context.auth_token) + for port_like_obj in ports + portgroups: + vif_port_id = port_like_obj.extra.get('vif_port_id') + + if not vif_port_id: + LOG.warning( + _LW('%(port_like_object)s %(pobj_uuid)s in node %(node)s ' + 'has no vif_port_id value in extra field.'), + {'port_like_object': port_like_obj.__class__.__name__, + 'pobj_uuid': port_like_obj.uuid, 'node': node.uuid}) + continue + + LOG.debug('Mapping tenant port %(vif_port_id)s to node ' + '%(node_id)s', + {'vif_port_id': vif_port_id, 'node_id': node.uuid}) + local_link_info = [] + if isinstance(port_like_obj, objects.Portgroup): + pg_ports = [p for p in task.ports + if p.portgroup_id == port_like_obj.id] + for port in pg_ports: + local_link_info.append(portmap[port.uuid]) + else: + # We iterate only on ports or portgroups, no need to check + # that it is a port + local_link_info.append(portmap[port_like_obj.uuid]) + body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': node.instance_uuid or node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': node.uuid, + 'binding:profile': { + 'local_link_information': local_link_info, + }, + } + } + + try: + client.update_port(vif_port_id, body) + except neutron_exceptions.ConnectionFailed as e: + msg = (_('Could not add public network VIF %(vif)s ' + 'to node %(node)s, possible network issue. %(exc)s') % + {'vif': vif_port_id, + 'node': node.uuid, + 'exc': e}) + LOG.error(msg) + raise exception.NetworkError(msg) + + def unconfigure_tenant_networks(self, task): + """Unconfigure tenant networks for a node. + + Even though nova takes care of port removal from tenant network, we + remove it here/now to avoid the possibility of the ironic port being + bound to the tenant and cleaning networks at the same time. + + :param task: A TaskManager instance. + :raises: NetworkError + """ + node = task.node + LOG.info(_LI('Unmapping instance ports from node %s'), node.uuid) + params = {'device_id': node.instance_uuid or node.uuid} + + neutron.remove_neutron_ports(task, params) diff --git a/ironic/tests/base.py b/ironic/tests/base.py index b4b7b71759..8a205ba631 100644 --- a/ironic/tests/base.py +++ b/ironic/tests/base.py @@ -119,7 +119,9 @@ class TestCase(testtools.TestCase): tempdir=tempfile.tempdir) self.config(cleaning_network_uuid=uuidutils.generate_uuid(), group='neutron') - self.config(enabled_network_interfaces=['flat', 'noop']) + self.config(provisioning_network_uuid=uuidutils.generate_uuid(), + group='neutron') + self.config(enabled_network_interfaces=['flat', 'noop', 'neutron']) self.set_defaults(host='fake-mini', debug=True) self.set_defaults(connection="sqlite://", diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py index a8b286140f..e83d815553 100644 --- a/ironic/tests/unit/common/test_driver_factory.py +++ b/ironic/tests/unit/common/test_driver_factory.py @@ -110,7 +110,7 @@ class NetworkInterfaceFactoryTestCase(db_base.DbTestCase): self.assertEqual(extension_mgr['flat'].obj, task.driver.network) self.assertEqual('ironic.hardware.interfaces.network', factory._entrypoint_name) - self.assertEqual(['flat', 'noop'], + self.assertEqual(['flat', 'neutron', 'noop'], sorted(factory._enabled_driver_list)) def test_build_driver_for_task_default_is_none(self): diff --git a/ironic/tests/unit/common/test_network.py b/ironic/tests/unit/common/test_network.py index e2240a9939..a37267a062 100644 --- a/ironic/tests/unit/common/test_network.py +++ b/ironic/tests/unit/common/test_network.py @@ -95,15 +95,21 @@ class TestNetwork(db_base.DbTestCase): result = network.get_node_vif_ids(task) self.assertEqual(expected, result) - def test_get_node_vif_ids_during_cleaning(self): + def _test_get_node_vif_ids_multitenancy(self, int_info_key): port = db_utils.create_test_port( node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', - internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + internal_info={int_info_key: 'test-vif-A'}) portgroup = db_utils.create_test_portgroup( node_id=self.node.id, address='dd:ee:ff:aa:bb:cc', - internal_info={'cleaning_vif_port_id': 'test-vif-B'}) - expected = {'portgroups': {portgroup.uuid: 'test-vif-B'}, - 'ports': {port.uuid: 'test-vif-A'}} + internal_info={int_info_key: 'test-vif-B'}) + expected = {'ports': {port.uuid: 'test-vif-A'}, + 'portgroups': {portgroup.uuid: 'test-vif-B'}} with task_manager.acquire(self.context, self.node.uuid) as task: result = network.get_node_vif_ids(task) self.assertEqual(expected, result) + + def test_get_node_vif_ids_during_cleaning(self): + self._test_get_node_vif_ids_multitenancy('cleaning_vif_port_id') + + def test_get_node_vif_ids_during_provisioning(self): + self._test_get_node_vif_ids_multitenancy('provisioning_vif_port_id') diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py index f93476813e..ca93feb66e 100644 --- a/ironic/tests/unit/dhcp/test_neutron.py +++ b/ironic/tests/unit/dhcp/test_neutron.py @@ -322,31 +322,21 @@ class TestNeutron(db_base.DbTestCase): fake_client.show_port.assert_called_once_with(port_id) @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') - def test__get_port_ip_address(self, mock_gfia): - expected = "192.168.1.3" - port = object_utils.create_test_port(self.context, - node_id=self.node.id, - address='aa:bb:cc:dd:ee:ff', - uuid=uuidutils.generate_uuid(), - extra={'vif_port_id': - 'test-vif-A'}, - driver='fake') - mock_gfia.return_value = expected - with task_manager.acquire(self.context, - self.node.uuid) as task: - api = dhcp_factory.DHCPFactory().provider - result = api._get_port_ip_address(task, port, - mock.sentinel.client) - self.assertEqual(expected, result) - mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) - - @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') - def test__get_port_ip_address_cleaning(self, mock_gfia): + def _test__get_port_ip_address(self, mock_gfia, network): expected = "192.168.1.3" + fake_vif = 'test-vif-%s' % network port = object_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), - internal_info={'cleaning_vif_port_id': 'test-vif-A'}) + extra={'vif_port_id': fake_vif} if network == 'tenant' else {}, + internal_info={ + 'cleaning_vif_port_id': (fake_vif if network == 'cleaning' + else None), + 'provisioning_vif_port_id': (fake_vif + if network == 'provisioning' + else None), + } + ) mock_gfia.return_value = expected with task_manager.acquire(self.context, self.node.uuid) as task: @@ -354,7 +344,16 @@ class TestNeutron(db_base.DbTestCase): result = api._get_port_ip_address(task, port, mock.sentinel.client) self.assertEqual(expected, result) - mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client) + mock_gfia.assert_called_once_with(fake_vif, mock.sentinel.client) + + def test__get_port_ip_address_tenant(self): + self._test__get_port_ip_address(network='tenant') + + def test__get_port_ip_address_cleaning(self): + self._test__get_port_ip_address(network='cleaning') + + def test__get_port_ip_address_provisioning(self): + self._test__get_port_ip_address(network='provisioning') @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address') def test__get_port_ip_address_for_portgroup(self, mock_gfia): diff --git a/ironic/tests/unit/drivers/modules/network/test_neutron.py b/ironic/tests/unit/drivers/modules/network/test_neutron.py new file mode 100644 index 0000000000..08d89c24d3 --- /dev/null +++ b/ironic/tests/unit/drivers/modules/network/test_neutron.py @@ -0,0 +1,231 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from neutronclient.common import exceptions as neutron_exceptions +from oslo_config import cfg +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common import neutron as neutron_common +from ironic.conductor import task_manager +from ironic.drivers.modules.network import neutron +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils + +CONF = cfg.CONF + + +class NeutronInterfaceTestCase(db_base.DbTestCase): + + def setUp(self): + super(NeutronInterfaceTestCase, self).setUp() + self.config(enabled_drivers=['fake']) + mgr_utils.mock_the_extension_manager() + self.interface = neutron.NeutronNetwork() + self.node = utils.create_test_node(self.context, + network_interface='neutron') + self.port = utils.create_test_port( + self.context, node_id=self.node.id, + address='52:54:00:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()}) + self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00', + 'mac_address': '52:54:00:cf:2d:32'} + + def test_init_incorrect_provisioning_net(self): + self.config(provisioning_network_uuid=None, group='neutron') + self.assertRaises(exception.DriverLoadError, neutron.NeutronNetwork) + self.config(provisioning_network_uuid=uuidutils.generate_uuid(), + group='neutron') + self.config(cleaning_network_uuid='asdf', group='neutron') + self.assertRaises(exception.DriverLoadError, neutron.NeutronNetwork) + + @mock.patch.object(neutron_common, 'add_ports_to_network') + def test_add_provisioning_network(self, add_ports_mock): + add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.add_provisioning_network(task) + add_ports_mock.assert_called_once_with( + task, CONF.neutron.provisioning_network_uuid) + self.port.refresh() + self.assertEqual(self.neutron_port['id'], + self.port.internal_info['provisioning_vif_port_id']) + + @mock.patch.object(neutron_common, 'remove_ports_from_network') + def test_remove_provisioning_network(self, remove_ports_mock): + self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.remove_provisioning_network(task) + remove_ports_mock.assert_called_once_with( + task, CONF.neutron.provisioning_network_uuid) + self.port.refresh() + self.assertNotIn('provisioning_vif_port_id', self.port.internal_info) + + @mock.patch.object(neutron_common, 'rollback_ports') + @mock.patch.object(neutron_common, 'add_ports_to_network') + def test_add_cleaning_network(self, add_ports_mock, rollback_mock): + add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.id) as task: + res = self.interface.add_cleaning_network(task) + rollback_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + self.assertEqual(res, add_ports_mock.return_value) + self.port.refresh() + self.assertEqual(self.neutron_port['id'], + self.port.internal_info['cleaning_vif_port_id']) + + @mock.patch.object(neutron_common, 'remove_ports_from_network') + def test_remove_cleaning_network(self, remove_ports_mock): + self.port.internal_info = {'cleaning_vif_port_id': 'vif-port-id'} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.remove_cleaning_network(task) + remove_ports_mock.assert_called_once_with( + task, CONF.neutron.cleaning_network_uuid) + self.port.refresh() + self.assertNotIn('cleaning_vif_port_id', self.port.internal_info) + + @mock.patch.object(neutron_common, 'remove_neutron_ports') + def test_unconfigure_tenant_networks(self, remove_ports_mock): + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.unconfigure_tenant_networks(task) + remove_ports_mock.assert_called_once_with( + task, {'device_id': task.node.uuid}) + + def test_configure_tenant_networks_no_ports_for_node(self): + n = utils.create_test_node(self.context, network_interface='neutron', + uuid=uuidutils.generate_uuid()) + with task_manager.acquire(self.context, n.id) as task: + self.assertRaisesRegexp( + exception.NetworkError, 'No ports are associated', + self.interface.configure_tenant_networks, task) + + @mock.patch.object(neutron_common, 'get_client') + @mock.patch.object(neutron, 'LOG') + def test_configure_tenant_networks_no_vif_id(self, log_mock, client_mock): + self.port.extra = {} + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + self.assertIn('no vif_port_id value in extra', + log_mock.warning.call_args[0][0]) + + @mock.patch.object(neutron_common, 'get_client') + def test_configure_tenant_networks_update_fail(self, client_mock): + client = client_mock.return_value + client.update_port.side_effect = neutron_exceptions.ConnectionFailed( + reason='meow') + with task_manager.acquire(self.context, self.node.id) as task: + self.assertRaisesRegexp( + exception.NetworkError, 'Could not add', + self.interface.configure_tenant_networks, task) + client_mock.assert_called_once_with(task.context.auth_token) + + @mock.patch.object(neutron_common, 'get_client') + def _test_configure_tenant_networks(self, client_mock): + upd_mock = mock.Mock() + client_mock.return_value.update_port = upd_mock + second_port = utils.create_test_port( + self.context, node_id=self.node.id, address='52:54:00:cf:2d:33', + extra={'vif_port_id': uuidutils.generate_uuid()}, + uuid=uuidutils.generate_uuid(), + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/1', + 'switch_info': 'switch2'} + ) + expected_body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': self.node.instance_uuid or self.node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': self.node.uuid, + } + } + port1_body = copy.deepcopy(expected_body) + port1_body['port']['binding:profile'] = { + 'local_link_information': [self.port.local_link_connection] + } + port2_body = copy.deepcopy(expected_body) + port2_body['port']['binding:profile'] = { + 'local_link_information': [second_port.local_link_connection] + } + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + upd_mock.assert_has_calls( + [mock.call(self.port.extra['vif_port_id'], port1_body), + mock.call(second_port.extra['vif_port_id'], port2_body)], + any_order=True + ) + + def test_configure_tenant_networks(self): + self.node.instance_uuid = uuidutils.generate_uuid() + self.node.save() + self._test_configure_tenant_networks() + + def test_configure_tenant_networks_no_instance_uuid(self): + self._test_configure_tenant_networks() + + @mock.patch.object(neutron_common, 'get_client') + def test_configure_tenant_networks_with_portgroups(self, client_mock): + pg = utils.create_test_portgroup( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32', + extra={'vif_port_id': uuidutils.generate_uuid()}) + port1 = utils.create_test_port( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:33', + uuid=uuidutils.generate_uuid(), + portgroup_id=pg.id, + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/1', + 'switch_info': 'switch2'} + ) + port2 = utils.create_test_port( + self.context, node_id=self.node.id, address='ff:54:00:cf:2d:34', + uuid=uuidutils.generate_uuid(), + portgroup_id=pg.id, + local_link_connection={'switch_id': '0a:1b:2c:3d:4e:ff', + 'port_id': 'Ethernet1/2', + 'switch_info': 'switch2'} + ) + upd_mock = mock.Mock() + client_mock.return_value.update_port = upd_mock + expected_body = { + 'port': { + 'device_owner': 'baremetal:none', + 'device_id': self.node.uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'baremetal', + 'binding:host_id': self.node.uuid, + } + } + call1_body = copy.deepcopy(expected_body) + call1_body['port']['binding:profile'] = { + 'local_link_information': [self.port.local_link_connection] + } + call2_body = copy.deepcopy(expected_body) + call2_body['port']['binding:profile'] = { + 'local_link_information': [port1.local_link_connection, + port2.local_link_connection] + } + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.configure_tenant_networks(task) + client_mock.assert_called_once_with(task.context.auth_token) + upd_mock.assert_has_calls( + [mock.call(self.port.extra['vif_port_id'], call1_body), + mock.call(pg.extra['vif_port_id'], call2_body)] + ) diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py index 78412e71fc..7cbafdbb92 100644 --- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py +++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py @@ -1406,6 +1406,17 @@ class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) + def test_get_single_nic_with_provisioning_vif_port_id(self): + obj_utils.create_test_port( + self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', + uuid=uuidutils.generate_uuid(), + internal_info={'provisioning_vif_port_id': 'test-vif-A'}, + driver='iscsi_ilo') + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + address = utils.get_single_nic_with_vif_port_id(task) + self.assertEqual('aa:bb:cc:dd:ee:ff', address) + class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase): diff --git a/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml b/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml new file mode 100644 index 0000000000..d8970fd73b --- /dev/null +++ b/releasenotes/notes/add-neutron-network-interface-aa9e7e65011ab8cd.yaml @@ -0,0 +1,14 @@ +--- +features: + - Added ``neutron`` network interface. This interface allows to provision + and/or clean node in separate networks. A new config option + ``[neutron]provisioning_network_uuid`` has been added. This option + specifies provision network UUID. +upgrade: + - | + If ``neutron`` network interface is specified in + ``[DEFAULT]enabled_network_interfaces``, + ``[neutron]provisioning_network_uuid`` and + ``[neutron]cleaning_network_uuid`` configuration options are required. If + any of them is not specified, the ironic-conductor service will fail to + start. diff --git a/setup.cfg b/setup.cfg index 4d1f319fdf..becd3df3e0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -90,6 +90,7 @@ ironic.drivers = ironic.hardware.interfaces.network = flat = ironic.drivers.modules.network.flat:FlatNetwork noop = ironic.drivers.modules.network.noop:NoopNetwork + neutron = ironic.drivers.modules.network.neutron:NeutronNetwork ironic.database.migration_backend = sqlalchemy = ironic.db.sqlalchemy.migration