follow-up: post-review feedback fixes for network data fixes

In If6125cf7af84dd1b4fa869f13932c43ac013d443 we identified a few different
minor issues, some syntax, and one concern over metadata and the data
length. In this patch, we address final pass reviewer comments *and*
shorten the interface ID values to more generic values which
fullfill the named label/id value in the configuration drive
schema, and are also values one could potentially use at some point
down the road with vlans.

Instead of "eth" as a prepended field name, we use "iface", since
we can't have any value which really suggests a direct interface
name mapping either.

Change-Id: I637412591ae57a8591a1a948f54be56785a8a1e3
Signed-off-by: Julia Kreger <juliaashleykreger@gmail.com>
This commit is contained in:
Julia Kreger
2025-06-10 11:18:59 -07:00
parent 62bf4d76d3
commit b37ae4388c
6 changed files with 114 additions and 67 deletions

View File

@@ -658,9 +658,7 @@ def get_neutron_port_data(port_id, vif_id, client=None, context=None,
# Finish setup of bond links configuration injection # Finish setup of bond links configuration injection
if bond_links: if bond_links:
links = [] links = [link['id'] for link in bond_links]
for link in bond_links:
links.append(link['id'])
# Add the list of links to the bond port # Add the list of links to the bond port
network_data['links'][0]['bond_links'] = links network_data['links'][0]['bond_links'] = links
# Add the rest of the links to the metadata. # Add the rest of the links to the metadata.

View File

@@ -1,5 +1,3 @@
# coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
@@ -47,9 +45,9 @@ def is_invalid_network_metadata(network_data):
# likely just declare missing MTU as a qualifier to rebuild, but # likely just declare missing MTU as a qualifier to rebuild, but
# that is likely to then trigger far more often. Maybe that means # that is likely to then trigger far more often. Maybe that means
# it is an even better idea... # it is an even better idea...
return ((len(network_data.get('links', [])) == 0) return (not network_data.get('links', [])
and (len(network_data.get('networks', [])) == 0) and not network_data.get('networks', [])
and (len(network_data.get('services', [])) == 0)) and not network_data.get('services', []))
except AttributeError: except AttributeError:
# Got called with None or something else lacking the attribute. # Got called with None or something else lacking the attribute.
# NOTE(TheJulia): If we ever want to inject metadata if missing # NOTE(TheJulia): If we ever want to inject metadata if missing
@@ -66,7 +64,7 @@ def check_and_patch_configdrive(task, configdrive):
correct, which is intended to allow the overall deployment process to correct, which is intended to allow the overall deployment process to
proceed with the correct data. proceed with the correct data.
:param task: A Taskmanager object. :param task: A TaskManager object.
:param configdrive: The in-memory configuration drive file which was :param configdrive: The in-memory configuration drive file which was
submitted by the requester. submitted by the requester.
:returns: The submitted configuration drive or a regenerated and patched :returns: The submitted configuration drive or a regenerated and patched
@@ -104,9 +102,12 @@ def check_and_patch_configdrive(task, configdrive):
# move on. # move on.
LOG.debug('Encountered error while trying to parse ' LOG.debug('Encountered error while trying to parse '
'configuration drive network_data.json file ' 'configuration drive network_data.json file '
'for node UUID %(node)s, instance %(inst)s.'
'content which was submitted to Ironic. ' 'content which was submitted to Ironic. '
'Error: %s', e) 'Error: %(error)s',
pass {'error': e,
'node': task.node.uuid,
'inst': task.node.instance_uuid})
LOG.warning('The provided metadata for the deployment of ' LOG.warning('The provided metadata for the deployment of '
'node %s appears invalid, and will be ' 'node %s appears invalid, and will be '
'regenerated based upon the available ' 'regenerated based upon the available '
@@ -121,15 +122,19 @@ def check_and_patch_configdrive(task, configdrive):
# regenerate_iso call takes overrides. # regenerate_iso call takes overrides.
regenerate_iso( regenerate_iso(
provided_iso, new_iso_file, provided_iso, new_iso_file,
{'/openstack/latest/network_data.json': network_data}) {'/openstack/latest/network_data.json': network_data},
node_uuid=task.node.uuid)
new_drive = _read_config_drive(new_iso_file) new_drive = _read_config_drive(new_iso_file)
# Remove the file. # Remove the file.
os.remove(new_iso_file) os.remove(new_iso_file)
except (OSError, processutils.ProcessExecutionError) as e: except (OSError, processutils.ProcessExecutionError) as e:
# Catch any general OSError (permission issue) or # Catch any general OSError (permission issue) or
# ProcessExecutionError from regenerate_iso. # ProcessExecutionError from regenerate_iso.
LOG.error('Failed to regenerate the configuration drive ISO, ' LOG.error('Failed to regenerate the configuration drive ISO '
'proceeding with submitted metadata: %s', e) 'for node %(node)s, '
'roceeding with submitted metadata: %s',
{'error': e,
'node': task.node.uuid})
return configdrive return configdrive
except exception.ConfigDriveRegenerationFailure: except exception.ConfigDriveRegenerationFailure:
# If this is being raised, the generation failed. That should have # If this is being raised, the generation failed. That should have
@@ -153,7 +158,7 @@ def _read_config_drive(file):
gzip.compress(new_iso.read())).decode() gzip.compress(new_iso.read())).decode()
def regenerate_iso(source, dest, override_files): def regenerate_iso(source, dest, override_files, node_uuid=None):
"""Utility method to regenerate a config drive ISO image. """Utility method to regenerate a config drive ISO image.
This method takes the source, extracts the contents in a temporary folder, This method takes the source, extracts the contents in a temporary folder,
@@ -166,12 +171,16 @@ def regenerate_iso(source, dest, override_files):
as values paris, which will be used to override as values paris, which will be used to override
contents of the configuration drive for the purpose contents of the configuration drive for the purpose
of patching metadata. of patching metadata.
:param node_uuid: The node UUID, for logging purposes.
""" """
with tempfile.TemporaryDirectory(dir=CONF.tempdir) as tempfolder: with tempfile.TemporaryDirectory(dir=CONF.tempdir) as tempfolder:
# NOTE(TheJulia): The base extraction logic was sourced from kickstart # NOTE(TheJulia): The base extraction logic was sourced from kickstart
# utils, as an actually kind of simple base approach to do the needful # utils, as an actually kind of simple base approach to do the needful
# extraction. # extraction.
LOG.debug('Extracting configuration drive to %s', tempfolder) LOG.debug('Extracting configuration drive for node %(node)s to '
'%(location)s.',
{'node': node_uuid,
'location': tempfolder})
# The default configuration drive format we expect is generally # The default configuration drive format we expect is generally
# of a rock ridge format, and to avoid classic dos ISO9660 level # of a rock ridge format, and to avoid classic dos ISO9660 level
# constraints, we will get the path using a rock ridge base path # constraints, we will get the path using a rock ridge base path
@@ -196,8 +205,11 @@ def regenerate_iso(source, dest, override_files):
source.get_file_from_iso( source.get_file_from_iso(
joliet_path=iso_file_path, local_path=posix_file_path) joliet_path=iso_file_path, local_path=posix_file_path)
# Okay, we have the files in a folder, lets patch! # Okay, we have the files in a folder, lets patch!
for file in override_files.keys(): for file in override_files:
LOG.debug('Patching override configuration drive file %s', file) LOG.debug('Patching override configuration drive file '
'%(file)s for node %(node)s.',
{'file': file,
'node': node_uuid})
content = override_files[file] content = override_files[file]
dest_path = os.path.join(tempfolder, str(file).lstrip('/')) dest_path = os.path.join(tempfolder, str(file).lstrip('/'))
with open(dest_path, mode="w") as new_file: with open(dest_path, mode="w") as new_file:
@@ -264,8 +276,8 @@ def generate_instance_network_data(task):
# The pattern here is keyed off a list of VIF Ids. We'll need # The pattern here is keyed off a list of VIF Ids. We'll need
# this to finish hydrating necessary data. # this to finish hydrating necessary data.
vif_id_keys = list(vif_id_to_objects['ports'].keys()) vif_id_keys = list(vif_id_to_objects['ports'])
vif_id_keys.extend(list(vif_id_to_objects['portgroups'].keys())) vif_id_keys.extend(list(vif_id_to_objects['portgroups']))
# This is the starting point for metadata generation. We have three # This is the starting point for metadata generation. We have three
# buckets which contain lists of dicts. In this specific case, we # buckets which contain lists of dicts. In this specific case, we
@@ -324,6 +336,39 @@ def generate_instance_network_data(task):
# as its entirely a possible situation in an entirely # as its entirely a possible situation in an entirely
# manually configured universe. # manually configured universe.
net_meta[key].extend(vif_net_meta[key]) net_meta[key].extend(vif_net_meta[key])
# NOTE(TheJulia): This is done after the fact since our entire
# structure assembles everything together by vif and upon the basis
# of the vif attachments. We swap the values down to shorter values
# because it turns out some configdrive readers will try to use the
# id as a name value instead of operating relatively within the
# context of the running state of the node where the data applies.
link_count = 0
link_map = {}
# This is all modeled to modify in place so we minimally touch
# the rest of the data structure.
for link in net_meta['links']:
link_name = 'iface{}'.format(link_count)
link_map[link['id']] = link_name
net_meta['links'][link_count]['id'] = link_name
link_count += 1
# We have to re-iterate through the all of the links to address
# bond links, since we should already have mapped names, and now
# we just need to correct them.
link_count = 0
for link in net_meta['links']:
bond_links = link.get('bond_links')
if bond_links:
new_links = [link_map[x] for x in bond_links]
net_meta['links'][link_count]['bond_links'] = new_links
link_count += 0
# We now need to re-associate networks to the new interface
# id values.
net_count = 0
for network in net_meta['networks']:
new_name = link_map[net_meta['networks'][net_count]['link']]
net_meta['networks'][net_count]['link'] = new_name
net_count += 1
return net_meta return net_meta
@@ -356,20 +401,7 @@ def generate_config_metadata(task):
# future, and with that somewhat in mind, the name of the method # future, and with that somewhat in mind, the name of the method
# was left appropriately broad to set that stage. # was left appropriately broad to set that stage.
# Logging first, because we know we're only being called if there
# is a problem. We can proceed from there.
LOG.error('An error has been detected in the supplied network '
'configuration metadata for the deployment of '
'node %s.', task.node.uuid)
metadata = generate_instance_network_data(task) metadata = generate_instance_network_data(task)
if metadata is None:
# We were unable to generate metadata because the underlying
# interface configuration, i.e. no neutron support.
LOG.error('Failed to generate new network metadata for '
'deployment of node %s as the underlying '
'interfaces do not signify neutron support.',
task.node.uuid)
raise exception.ConfigDriveRegenerationFailure()
if is_invalid_network_metadata(metadata): if is_invalid_network_metadata(metadata):
# NOTE(TheJulia): This *should* never happen in the scenarios # NOTE(TheJulia): This *should* never happen in the scenarios
# known to Ironic, however *could* happen if there is some sort # known to Ironic, however *could* happen if there is some sort
@@ -411,13 +443,7 @@ def check_and_fix_configdrive(task, configdrive):
# use this same method to also just supply network metadata injection if # use this same method to also just supply network metadata injection if
# no configuration drive is supplied. # no configuration drive is supplied.
try: try:
if configdrive is None or configdrive == {}: if isinstance(configdrive, str) and configdrive.startswith('http'):
LOG.debug('Failed to locate configuration drive contents for '
'node %s.', task.node.uuid)
# Nothing to see here, move along. Possible opportunity to
# improve UX by pre-emptively injecting a configuration drive.
return configdrive
elif isinstance(configdrive, str) and configdrive.startswith('http'):
# In this event, we've been given a URL. We don't support # In this event, we've been given a URL. We don't support
# trying to do anything else hwere. # trying to do anything else hwere.
return configdrive return configdrive

View File

@@ -563,7 +563,7 @@ opts = [
help=_('Option to disable operations which check and ' help=_('Option to disable operations which check and '
'potentially fix up configuration drive contents, ' 'potentially fix up configuration drive contents, '
'such as invalid network metadata values. When these ' 'such as invalid network metadata values. When these '
'issues are detected, and ironic is able to correct ' 'issues are detected, and Ironic is able to correct '
'the data, Ironic will do so transparently. Setting ' 'the data, Ironic will do so transparently. Setting '
'this option to True will disable this ' 'this option to True will disable this '
'functionality.')), 'functionality.')),

View File

@@ -16,8 +16,6 @@ from ironic.drivers import base
class NoopNetwork(base.NetworkInterface): class NoopNetwork(base.NetworkInterface):
"""Noop network interface.""" """Noop network interface."""
capabilities = []
def port_changed(self, task, port_obj): def port_changed(self, task, port_obj):
"""Handle any actions required when a port changes """Handle any actions required when a port changes

View File

@@ -75,6 +75,8 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
@mock.patch.object(cd_utils, 'generate_instance_network_data', @mock.patch.object(cd_utils, 'generate_instance_network_data',
autospec=True) autospec=True)
def test_generate_config_metadata_none(self, mock_gen, mock_valid): def test_generate_config_metadata_none(self, mock_gen, mock_valid):
# We don't expect None to ever be returned from
# generate_instance_network_data, but just in case!
mock_gen.return_value = None mock_gen.return_value = None
with task_manager.acquire(self.context, self.node.id, with task_manager.acquire(self.context, self.node.id,
shared=True) as task: shared=True) as task:
@@ -83,7 +85,7 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
cd_utils.generate_config_metadata, cd_utils.generate_config_metadata,
task) task)
mock_gen.assert_called_once_with(task) mock_gen.assert_called_once_with(task)
mock_valid.assert_not_called() mock_valid.assert_called_once_with(None)
@mock.patch.object(cd_utils, 'is_invalid_network_metadata', @mock.patch.object(cd_utils, 'is_invalid_network_metadata',
autospec=True) autospec=True)
@@ -216,26 +218,26 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
expected = { expected = {
'links': [ 'links': [
{'ethernet_mac_address': '52:54:00:cf:2d:31', {'ethernet_mac_address': '52:54:00:cf:2d:31',
'id': '86ffb399-6dd4-4e6c-8eb0-069d608bc5ca', 'id': 'iface0',
'mtu': 1500, 'mtu': 1500,
'type': 'phy', 'type': 'phy',
'vif_id': 'meep'}, 'vif_id': 'meep'},
{'ethernet_mac_address': '51:54:00:cf:2d:32', {'ethernet_mac_address': '51:54:00:cf:2d:32',
'id': '373960ab-131f-4a37-8329-8cda768ba722', 'id': 'iface1',
'mtu': 1500, 'mtu': 1500,
'type': 'phy', 'type': 'phy',
'vif_id': 'beep'}], 'vif_id': 'beep'}],
'networks': [ 'networks': [
{'id': 'boop', {'id': 'boop',
'ip_address': '192.168.1.1', 'ip_address': '192.168.1.1',
'link': '86ffb399-6dd4-4e6c-8eb0-069d608bc5ca', 'link': 'iface0',
'netmask': '255.255.255.0', 'netmask': '255.255.255.0',
'network_id': 'a8164a5e-ce7e-4ce4-b017-20c93a559f7c', 'network_id': 'a8164a5e-ce7e-4ce4-b017-20c93a559f7c',
'routes': [], 'routes': [],
'type': 'ipv4'}, 'type': 'ipv4'},
{'id': 'boop', {'id': 'boop',
'ip_address': '192.168.1.2', 'ip_address': '192.168.1.2',
'link': '373960ab-131f-4a37-8329-8cda768ba722', 'link': 'iface1',
'netmask': '255.255.255.0', 'netmask': '255.255.255.0',
'network_id': 'a8164a5e-ce7e-4ce4-b017-20c93a559f7c', 'network_id': 'a8164a5e-ce7e-4ce4-b017-20c93a559f7c',
'routes': [], 'routes': [],
@@ -285,6 +287,15 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
'type': 'phy', 'type': 'phy',
'ethernet_mac_address': '01:02:03:04:05:06', 'ethernet_mac_address': '01:02:03:04:05:06',
'vif_id': 'meep', 'vif_id': 'meep',
'mtu': 1500,
'bond_links': [port1.uuid, port2.uuid]},
{'id': port1.uuid,
'type': 'phy',
'ethernet_mac_address': port1.address,
'mtu': 1500},
{'id': port2.uuid,
'type': 'phy',
'ethernet_mac_address': port2.address,
'mtu': 1500}], 'mtu': 1500}],
'networks': [ 'networks': [
{'id': "boop", {'id': "boop",
@@ -296,14 +307,37 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
'routes': []}], 'routes': []}],
'services': [] 'services': []
} }
expected = {
'links': [
{'bond_links': ['iface1', 'iface2'],
'ethernet_mac_address': '01:02:03:04:05:06',
'id': 'iface0',
'mtu': 1500,
'type': 'phy',
'vif_id': 'meep'},
{'ethernet_mac_address': '52:54:00:cf:2d:31',
'id': 'iface1',
'mtu': 1500,
'type': 'phy'},
{'ethernet_mac_address': '51:54:00:cf:2d:32',
'id': 'iface2',
'mtu': 1500,
'type': 'phy'}],
'networks': [{
'id': 'boop',
'ip_address': '192.168.1.1',
'link': 'iface0',
'netmask': '255.255.255.0',
'network_id': 'a8164a5e-ce7e-4ce4-b017-20c93a559f7c',
'routes': [],
'type': 'ipv4'}],
'services': []}
mock_gnpd.return_value = gnpd mock_gnpd.return_value = gnpd
with task_manager.acquire(self.context, self.node.id, with task_manager.acquire(self.context, self.node.id,
shared=True) as task: shared=True) as task:
cd_utils.generate_instance_network_data(task) res = cd_utils.generate_instance_network_data(task)
# NOTE(TheJulia): Not really concerned with the result, the self.assertEqual(expected, res)
# key aspect is to ensure we assembled the bond links correctly
# from the available data. Other tests ensure the fields
# get assembled together.
mock_gnpd.assert_called_once_with( mock_gnpd.assert_called_once_with(
pg.id, 'meep', pg.id, 'meep',
mac_address=pg.address, mac_address=pg.address,
@@ -409,7 +443,8 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
cd_utils.check_and_patch_configdrive(task, 'foo') cd_utils.check_and_patch_configdrive(task, 'foo')
mock_regen.assert_called_once_with( mock_regen.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
{'/openstack/latest/network_data.json': mock.ANY}) {'/openstack/latest/network_data.json': mock.ANY},
node_uuid=self.node.uuid)
self.assertTrue(mock_remove.called) self.assertTrue(mock_remove.called)
mock_temp.assert_called_once_with(dir=mock.ANY, mode='wb+') mock_temp.assert_called_once_with(dir=mock.ANY, mode='wb+')
mock_mkstemp.assert_called_once_with(dir=mock.ANY) mock_mkstemp.assert_called_once_with(dir=mock.ANY)
@@ -450,7 +485,8 @@ class MetadataUtilsTestCase(db_base.DbTestCase):
cd_utils.check_and_patch_configdrive(task, 'foo') cd_utils.check_and_patch_configdrive(task, 'foo')
mock_regen.assert_called_once_with( mock_regen.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
{'/openstack/latest/network_data.json': mock.ANY}) {'/openstack/latest/network_data.json': mock.ANY},
node_uuid=self.node.uuid)
self.assertTrue(mock_remove.called) self.assertTrue(mock_remove.called)
mock_temp.assert_called_once_with(dir=mock.ANY, mode='wb+') mock_temp.assert_called_once_with(dir=mock.ANY, mode='wb+')
mock_mkstemp.assert_called_once_with(dir=mock.ANY) mock_mkstemp.assert_called_once_with(dir=mock.ANY)
@@ -604,17 +640,6 @@ class PatchConfigDriveTestCase(db_base.DbTestCase):
'services': [], 'services': [],
} }
def test__check_and_fix_configdrive_noop(self, mock_cdp, mock_gen,
mock_invalid):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(
cd_utils.check_and_fix_configdrive(task, None))
self.assertEqual(
{}, cd_utils.check_and_fix_configdrive(task, {}))
mock_invalid.assert_not_called()
mock_cdp.assert_not_called()
mock_gen.assert_not_called()
def test_check_and_fix_configdrive_metadata(self, mock_cdp, mock_gen, def test_check_and_fix_configdrive_metadata(self, mock_cdp, mock_gen,
mock_invalid): mock_invalid):
mock_invalid.return_value = True mock_invalid.return_value = True

View File

@@ -2,7 +2,7 @@
fixes: fixes:
- | - |
Fixes an issue where a Nova, or other consumer attempting to send network Fixes an issue where a Nova, or other consumer attempting to send network
data to Ironic can send grossly invalid network metadata which needs to data to Ironic can send invalid network metadata which needs to
be replaced. Ironic now identifies the condition, and regenerates the network be replaced. Ironic now identifies the condition, and regenerates the network
metadata utilizing the attached VIF records. This results in some minor metadata utilizing the attached VIF records. This results in some minor
data differences, such as Nova's internal VIF tap naming which is redundant, data differences, such as Nova's internal VIF tap naming which is redundant,