Add SR-IOV support

This patch makes the necessary charm changes in order to expose
and attach SR-IOV VFs to Openstack instances.

VFs that support hardware offloading will be handled by ovn,
the others will be processed by the Neutron SR-IOV nic agent.

neutron-k8s charm
-----------------

* enable sriovnicswitch ml2 mechanism driver

nova-k8s charm
--------------

* add pci-aliases setting
  * defines aliases for PCI devices, which can be requested through
    flavor extra specs
  * passed to openstack-hypervisor through the relation data
* enable PciPassthroughFilter and NUMATopologyFilter n-sch filters
* enable "filter_scheduler.pci_in_placement"

openstack-hypervisor charm
--------------------------

* add pci-device-specs setting
  * allows whitelisting PCI devices (including SR-IOV PF/VFs)

Change-Id: Ie118ac84b975275df84af5a378a95bbe49bfeea2
Signed-off-by: Lucian Petrut <lpetrut@cloudbasesolutions.com>
This commit is contained in:
Lucian Petrut
2025-06-24 13:55:24 +00:00
parent 09d6d4ab55
commit 43bfbcb69d
8 changed files with 111 additions and 35 deletions

View File

@@ -340,7 +340,7 @@ class OVNContext(sunbeam_ctxts.ConfigContext):
"extension_drivers": "port_security,qos,dns_domain_ports,port_forwarding,uplink_status_propagation",
"type_drivers": "geneve,vlan,flat",
"tenant_network_types": "geneve,vlan,flat",
"mechanism_drivers": "ovn",
"mechanism_drivers": "sriovnicswitch,ovn",
# Limiting defaults to 2**16 -1 even though geneve vni max is 2**24-1
# ml2_geneve_allocations will be populated with each vni range
# which will result in db timeouts if range is 1 - 2**24-1

View File

@@ -36,6 +36,13 @@ config:
default: RegionOne
description: Name of the OpenStack region
type: string
pci-aliases:
type: string
description: |
Sets the `pci-alias` option in nova.conf, defining aliases for assignable
PCI devices that can be requested through flavor extra specs.
Example: [{"vendor_id": "8086", "product_id": "1563", "name": "intel-sr-iov"}]
containers:
nova-api:

View File

@@ -114,7 +114,7 @@ class NovaServiceProvides(Object):
self.on.config_request.emit(event.relation)
def set_config(
self, relation: Relation | None, nova_spiceproxy_url: str
self, relation: Relation | None, nova_spiceproxy_url: str, pci_aliases: str,
) -> None:
"""Set nova configuration on the relation."""
if not self.charm.unit.is_leader():
@@ -125,23 +125,23 @@ class NovaServiceProvides(Object):
# applications. This happens usually when config data is
# updated by provider and wants to send the data to all
# related applications
relation_data_updates = {
"spice-proxy-url": nova_spiceproxy_url,
"pci-aliases": pci_aliases,
}
if relation is None:
logging.debug(
"Sending config to all related applications of relation"
f"{self.relation_name}"
)
for relation in self.framework.model.relations[self.relation_name]:
relation.data[self.charm.app][
"spice-proxy-url"
] = nova_spiceproxy_url
relation.data[self.charm.app].update(relation_data_updates)
else:
logging.debug(
f"Sending config on relation {relation.app.name} "
f"{relation.name}/{relation.id}"
)
relation.data[self.charm.app][
"spice-proxy-url"
] = nova_spiceproxy_url
relation.data[self.charm.app].update(relation_data_updates)
class NovaConfigChangedEvent(RelationEvent):
@@ -208,3 +208,8 @@ class NovaServiceRequires(Object):
def nova_spiceproxy_url(self) -> str | None:
"""Return the nova_spiceproxy url."""
return self.get_remote_app_data("spice-proxy-url")
@property
def pci_aliases(self) -> str | None:
"""Return pci aliases."""
return self.get_remote_app_data("pci-aliases")

View File

@@ -18,6 +18,7 @@
This charm provide Nova services as part of an OpenStack deployment
"""
import json
import logging
import socket
import uuid
@@ -73,6 +74,23 @@ class WSGINovaMetadataConfigContext(sunbeam_ctxts.ConfigContext):
}
@sunbeam_tracing.trace_type
class NovaConfigContext(sunbeam_ctxts.ConfigContext):
"""Configuration context for Nova configuration."""
def context(self) -> dict:
"""Nova configuration options."""
config = self.charm.model.config
ctxt = {}
aliases = json.loads(config.get("pci-aliases") or "[]")
ctxt["pci_aliases"] = [
json.dumps(alias, sort_keys=True) for alias in aliases
]
return ctxt
@sunbeam_tracing.trace_type
class NovaSchedulerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
"""Pebble handler for Nova scheduler."""
@@ -551,7 +569,8 @@ class NovaOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
WSGINovaMetadataConfigContext(
self,
"wsgi_nova_metadata",
)
),
NovaConfigContext(self, "nova"),
]
)
return _cadapters
@@ -808,36 +827,34 @@ class NovaOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
metadata_secret = self.get_shared_metadatasecret()
if metadata_secret:
logger.debug("Found metadata secret in leader DB")
elif self.unit.is_leader():
logger.debug("Creating metadata secret")
self.set_shared_metadatasecret()
else:
if self.unit.is_leader():
logger.debug("Creating metadata secret")
self.set_shared_metadatasecret()
self.handle_traefik_ready(event)
self.set_config_on_update()
else:
logger.debug("Metadata secret not ready")
return
logger.debug("Metadata secret not ready")
return
if self.unit.is_leader():
self.handle_traefik_ready(event)
self.set_config_on_update()
super().configure_charm(event)
def set_config_from_event(self, event: ops.framework.EventBase) -> None:
"""Set config in relation data."""
if self.nova_spiceproxy_public_url:
self.config_svc.interface.set_config(
relation=event.relation,
nova_spiceproxy_url=self.nova_spiceproxy_public_url,
)
else:
logging.debug("Nova spiceproxy not yet set, not sending config")
self.config_svc.interface.set_config(
relation=event.relation,
nova_spiceproxy_url=self.nova_spiceproxy_public_url,
pci_aliases=self.model.config.get("pci-aliases"),
)
def set_config_on_update(self) -> None:
"""Set config on relation on update of local data."""
if self.nova_spiceproxy_public_url:
self.config_svc.interface.set_config(
relation=None,
nova_spiceproxy_url=self.nova_spiceproxy_public_url,
)
else:
logging.debug("Nova spiceproxy not yet set, not sending config")
self.config_svc.interface.set_config(
relation=None,
nova_spiceproxy_url=self.nova_spiceproxy_public_url,
pci_aliases=self.model.config.get("pci-aliases"),
)
if __name__ == "__main__": # pragma: nocover

View File

@@ -44,6 +44,16 @@ enable = False
[conductor]
workers = 4
[pci]
{% for alias in nova.pci_aliases -%}
alias = {{ alias }}
{% endfor -%}
[filter_scheduler]
enabled_filters = ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, PciPassthroughFilter, NUMATopologyFilter
available_filters = nova.scheduler.filters.all_filters
pci_in_placement = True
[scheduler]
# NOTE(jamespage): perform automatic host cell mapping
# until we can orchestrate this better

View File

@@ -49,6 +49,35 @@ config:
hosts. This memory will be used for instances. The compute
usage report deducts this memory from the available
memory sent to the placement service.
pci-device-specs:
type: string
description: |
A list of device specs used to set the `pci.device_spec` option in
nova.conf, which allows PCI passthrough of specific devices to VMs.
Example applications: GPU processing, SR-IOV networking, etc.
NOTE: For PCI passthrough to work IOMMU must be enabled on the machine
deployed to. This can be accomplished by setting kernel parameters on
capable machines in MAAS, tagging them and using these tags as
constraints in the model.
Examples:
* specify the PF address, exposing all the corresponding VFs:
[{"physical_network": "physnet1", "address": "0000:1b:00.0"}]
* pick VFs individually:
[{"physical_network": "physnet2", "address": "0000:1b:10.0"},
{"physical_network": "physnet2", "address": "0000:1b:10.2}]
* address wildcards:
[{"physical_network": "physnet1", "address": "*:1b:10.*"},
{"physical_network": "physnet1", "address": ":1b:}]
* address regex patterns:
[{"physical_network": "physnet1",
"address":
{"domain": ".*", "bus": "1b", "slot": "10", "function": "[0-4]"}}]
* vendor and product id:
[{"physical_network": "physnet1", "vendor_id": "8086", "product_id": "1563"}]
actions:
set-hypervisor-local-settings:

View File

@@ -579,6 +579,7 @@ class HypervisorOperatorCharm(sunbeam_charm.OSBaseOperatorCharm):
or config("ip-address")
or local_ip,
"compute.resume-on-boot": config("resume-on-boot"),
"compute.pci-device-specs": config("pci-device-specs"),
"credentials.ovn-metadata-proxy-shared-secret": self.metadata_secret(),
"identity.admin-role": contexts.identity_credentials.admin_role,
"identity.auth-url": contexts.identity_credentials.internal_endpoint,
@@ -670,15 +671,20 @@ class HypervisorOperatorCharm(sunbeam_charm.OSBaseOperatorCharm):
def _handle_nova_service(
self, contexts: sunbeam_core.OPSCharmContexts
) -> dict:
config = {}
try:
if contexts.nova_service.nova_spiceproxy_url:
return {
"compute.spice-proxy-url": contexts.nova_service.nova_spiceproxy_url,
}
config["compute.spice-proxy-url"] = (
contexts.nova_service.nova_spiceproxy_url
)
if contexts.nova_service.pci_aliases:
config["compute.pci-aliases"] = (
contexts.nova_service.pci_aliases
)
except AttributeError as e:
logger.debug(f"Nova service relation not integrated: {str(e)}")
return {}
return config
def _handle_masakari_service(
self, contexts: sunbeam_core.OPSCharmContexts

View File

@@ -157,6 +157,7 @@ class TestCharm(test_utils.CharmTestCase):
"compute.cert": certificate,
"compute.key": private_key,
"compute.migration-address": "10.0.0.10",
"compute.pci-device-specs": None,
"compute.resume-on-boot": True,
"compute.rbd-user": "nova",
"compute.rbd-secret-uuid": "ddd",
@@ -276,6 +277,7 @@ class TestCharm(test_utils.CharmTestCase):
"compute.key": private_key,
"compute.migration-address": "10.0.0.10",
"compute.resume-on-boot": True,
"compute.pci-device-specs": None,
"compute.rbd-user": "nova",
"compute.rbd-secret-uuid": "ddd",
"compute.rbd-key": "eee",