[OVN] Delete a resource provider removed from the agent config

In ML2/OVN, The Placement information per compute node is stored in the
local OVS database, in the Open vSwitch register. Each time this
information changes, ML2/OVN receives this event and updates the OVN
agent child registers assigned to devices.

Since this patch, if a resource provider is removed, the corresponding
Placement resource provider register (and its inventory) is removed too.

NOTE: if a resource provider has allocations, the delete operation will
fail with HTTP 409 (Conflict) and the Neutron API will log this error.
The error message will be:
  Unable to delete resource provider <UUID>: Resource provider has
  allocations.

Closes-Bug: #2101998
Change-Id: I78319cc3c94bf869383ecfaf14cb276b46086b8e
This commit is contained in:
Rodolfo Alonso Hernandez
2025-03-11 14:52:08 +00:00
committed by Rodolfo Alonso
parent 979385d8f9
commit de237bc92f
4 changed files with 109 additions and 10 deletions

View File

@@ -92,11 +92,14 @@ class PlacementState:
hypervisor_rps,
device_mappings,
supported_vnic_types,
client):
client,
rp_deleted=None,
):
self._rp_bandwidths = rp_bandwidths
self._rp_inventory_defaults = rp_inventory_defaults
self._rp_pp = rp_pkt_processing
self._rp_pp_inventory_defaults = rp_pkt_processing_inventory_defaults
self._rp_deleted = rp_deleted
self._driver_uuid_namespace = driver_uuid_namespace
self._agent_type = agent_type
self._hypervisor_rps = hypervisor_rps
@@ -176,14 +179,26 @@ class PlacementState:
'parent_provider_uuid': agent_rp_uuid}))
return rps
def _deferred_delete_device_rps(self):
rps = []
if not self._rp_deleted:
return rps
for device in self._rp_deleted:
hypervisor = self._hypervisor_rps[device]
rp_uuid = place_utils.device_resource_provider_uuid(
self._driver_uuid_namespace,
hypervisor['name'],
device)
rps.append(
DeferredCall(self._client.delete_resource_provider, rp_uuid))
return rps
def deferred_create_resource_providers(self):
agent_rps = self._deferred_create_agent_rps()
device_rps = self._deferred_create_device_rps()
rps = []
rps.extend(agent_rps)
rps.extend(device_rps)
return rps
deleted_rps = self._deferred_delete_device_rps()
return agent_rps + device_rps + deleted_rps
def _deferred_update_agent_rp_traits(self, traits_):
agent_rp_traits = []

View File

@@ -143,7 +143,8 @@ class ChassisBandwidthConfigEvent(row_event.RowEvent):
def run(self, event, row, old):
name2uuid = self.placement_extension.name2uuid()
state = self.placement_extension.build_placement_state(row, name2uuid)
state = self.placement_extension.build_placement_state(row, name2uuid,
chassis_old=old)
if not state:
return
@@ -207,6 +208,8 @@ class OVNClientPlacementExtension:
name2uuid = self.name2uuid()
for ch in self._driver._sb_idl.chassis_list().execute(
check_error=True):
# TODO(ralonsoh): retrieve the OVN controller agent current RP
# information and delete any child RP not present in the chassis.
state = self.build_placement_state(ch, name2uuid)
if state:
chassis[ch.name] = state
@@ -244,9 +247,22 @@ class OVNClientPlacementExtension:
'(name:uuid):%s ', _name2uuid)
return _name2uuid
def build_placement_state(self, chassis, name2uuid):
def build_placement_state(self, chassis, name2uuid, chassis_old=None):
bridge_mappings = _parse_bridge_mappings(chassis)
cms_options = _parse_ovn_cms_options(chassis)
try:
cms_options_old = _parse_ovn_cms_options(chassis_old)
except AttributeError:
cms_options_old = {}
rp_new = set(cms_options.get(n_const.RP_BANDWIDTHS, {}).keys())
rp_old = set(cms_options_old.get(n_const.RP_BANDWIDTHS, {}).keys())
rp_deleted = rp_old - rp_new
rp_hyp_deleted = {
device: hyperv for device, hyperv in
cms_options_old.get(n_const.RP_HYPERVISORS, {}).items() if
device in rp_deleted}
LOG.debug('Building placement options for chassis %s: %s',
chassis.name, cms_options)
hypervisor_rps = {}
@@ -257,7 +273,10 @@ class OVNClientPlacementExtension:
# ovn-cms-options =
# resource_provider_bandwidths=br-ex:100:200;rp_tunnelled:300:400
# resource_provider_hypervisors=br-ex:host1,rp_tunnelled:host1
for device, hyperv in cms_options[n_const.RP_HYPERVISORS].items():
rp_hypervisors = itertools.chain(
cms_options[n_const.RP_HYPERVISORS].items(),
rp_hyp_deleted.items())
for device, hyperv in rp_hypervisors:
try:
hypervisor_rps[device] = {'name': hyperv,
'uuid': name2uuid[hyperv]}
@@ -294,4 +313,6 @@ class OVNClientPlacementExtension:
hypervisor_rps=hypervisor_rps,
device_mappings=bridge_mappings,
supported_vnic_types=self.supported_vnic_types,
client=self.placement_plugin._placement_client)
client=self.placement_plugin._placement_client,
rp_deleted=rp_deleted,
)

View File

@@ -15,6 +15,7 @@
from unittest import mock
from neutron_lib import constants as n_const
from oslo_utils import uuidutils
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \
import placement as p_extension
@@ -32,6 +33,9 @@ class TestOVNClientPlacementExtension(test_plugin.Ml2PluginV2TestCase):
self.plugin_driver = mock.Mock()
self.placement_driver = p_extension.OVNClientPlacementExtension(
self.plugin_driver)
# Ensure the ``OVNClientPlacementExtension`` object is new, that the
# previous instance has been deleted.
self.assertEqual(self.plugin_driver, self.placement_driver._driver)
self.placement_client = mock.Mock(
update_trait=mock.Mock(__name__='update_trait'),
ensure_resource_provider=mock.Mock(__name__='ensure_rp'),
@@ -46,6 +50,18 @@ class TestOVNClientPlacementExtension(test_plugin.Ml2PluginV2TestCase):
'resource_providers': [{'name': 'compute1', 'uuid': 'uuid1'},
{'name': 'compute2', 'uuid': 'uuid2'}]
}
self.name2uuid = self._gen_name2uuid(['compute1',
'compute2',
])
self.addCleanup(self._delete_placement_singleton_instance)
def _delete_placement_singleton_instance(self):
del self.placement_driver
@staticmethod
def _gen_name2uuid(hypervisor_list):
return {hypervisor: uuidutils.generate_uuid() for
hypervisor in hypervisor_list}
def test_read_initial_chassis_config(self):
# Add two public networks, a RP per bridge and the correlation between
@@ -231,3 +247,43 @@ class TestOVNClientPlacementExtension(test_plugin.Ml2PluginV2TestCase):
n_const.RP_HYPERVISORS: {}
}}
_check_expected_config(init_conf, expected)
def test_build_placement_state_no_rp_deleted(self):
chassis = fakes.FakeChassis.create(
bridge_mappings=['public1:br-ext1', 'public2:br-ext2'],
rp_bandwidths=['br-ext1:1000:2000', 'br-ext2:3000:4000'],
rp_inventory_defaults={'allocation_ratio': 1.0, 'min_unit': 5},
rp_hypervisors=['br-ext1:compute1', 'br-ext2:compute1'])
chassis_old = fakes.FakeChassis.create(
bridge_mappings=['public1:br-ext1', 'public2:br-ext2'],
rp_bandwidths=['br-ext1:1001:2002', 'br-ext2:3003:4004'],
rp_inventory_defaults={'allocation_ratio': 1.0, 'min_unit': 5},
rp_hypervisors=['br-ext1:compute1', 'br-ext2:compute1'])
report = self.placement_driver.build_placement_state(
chassis, self.name2uuid, chassis_old=chassis_old)
self.assertEqual(set(), report._rp_deleted)
def test_build_placement_state_rp_deleted(self):
chassis = fakes.FakeChassis.create(
bridge_mappings=['public1:br-ext1', 'public2:br-ext2'],
rp_bandwidths=['br-ext1:1000:2000'],
rp_inventory_defaults={'allocation_ratio': 1.0, 'min_unit': 5},
rp_hypervisors=['br-ext1:compute1', 'br-ext2:compute1'])
chassis_old = fakes.FakeChassis.create(
bridge_mappings=['public1:br-ext1', 'public2:br-ext2'],
rp_bandwidths=['br-ext1:1001:2002', 'br-ext2:3003:4004'],
rp_inventory_defaults={'allocation_ratio': 1.0, 'min_unit': 5},
rp_hypervisors=['br-ext1:compute1', 'br-ext2:compute1'])
report = self.placement_driver.build_placement_state(
chassis, self.name2uuid, chassis_old=chassis_old)
self.assertEqual({'br-ext2'}, report._rp_deleted)
def test_build_placement_state_no_old_chassis(self):
chassis = fakes.FakeChassis.create(
bridge_mappings=['public1:br-ext1', 'public2:br-ext2'],
rp_bandwidths=['br-ext1:1000:2000'],
rp_inventory_defaults={'allocation_ratio': 1.0, 'min_unit': 5},
rp_hypervisors=['br-ext1:compute1', 'br-ext2:compute1'])
report = self.placement_driver.build_placement_state(
chassis, self.name2uuid)
self.assertEqual(set(), report._rp_deleted)

View File

@@ -0,0 +1,7 @@
---
other:
- |
The ML2/OVN Placement extension now removes any existing resource provider
deleted from the updated local node configuration. If the resource provider
has allocations, Placement will return an exception and it will not be
deleted.