Offload network provisionment for AIO to openstack_hosts

For couple of cycles we include our systemd-related roles in
openstack_hosts role, with purpose of operators being able to
configure their nodes before any actuall service or infra role will
kick in.

This should also provide ability to configure networking for LXC and
setup overall.

So with moving systemd role includes during AIO setup to
variables which leverage openstack_hosts behavior we achieve
multiple goals at once:
- Reduce amount of intersecting logic in AIO bootstrap
- Add testing for openstack_hosts section and ensure it's functional
- Improve transparency for deployers and ability to see and change
network configuration, as it will be templated as variables file
instead of be directly applied.

As a consequence, we need to access host by it's already established
IP address, rather then one defined by us.
However, we already require having `bootstrap_host_public_address`
and rely on it heavily, so it should not bring much regressions
with the approach.

Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-haproxy_server/+/953670
Depends-On: https://review.opendev.org/c/openstack/openstack-ansible/+/954316
Change-Id: Ib688a5a758ec5ebbd0ebafdcaf2b27aeecdd9aba
Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
This commit is contained in:
Dmitriy Rabotyagov
2025-06-27 15:14:55 +02:00
parent 5b7875d03b
commit 31f12f54ea
40 changed files with 341 additions and 282 deletions

View File

@@ -1,3 +1,4 @@
registration_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
metering-alarm_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
key-manager_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
reservation_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,9 +1,11 @@
# The compute host that the ceilometer compute agent will be running on.
metering-compute_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
# The infra nodes that the central agents will be running on
metering-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,26 +1,31 @@
# The infra nodes where the Ceph mon services will run
ceph-mon_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
# The nodes that the Ceph OSD disks will be running on
ceph-osd_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% if 'manila' not in bootstrap_host_scenarios_expanded %}
# The nodes that the Ceph RadosGW object gateways will be running on
ceph-rgw_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% endif %}
{% if 'manila' in bootstrap_host_scenarios_expanded %}
ceph-mds_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
ceph-nfs_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% endif %}

View File

@@ -1,7 +1,8 @@
---
storage-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
container_vars:
cinder_qos_specs:
- name: low-iops
@@ -28,4 +29,5 @@ storage-infra_hosts:
storage_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
# The controller host that will be running the cloudkitty services
rating_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
dnsaas_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
image_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -15,4 +15,5 @@
metrics_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
load_balancer_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
orchestration_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
dashboard_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,14 +1,17 @@
---
ironic-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
# Ironic compute hosts. These compute hosts will be used to
# facilitate ironic's interactions through nova.
ironic-compute_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
ironic-inspector_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
---
identity_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
magnum-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,7 +1,9 @@
manila-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
manila-data_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,8 +1,10 @@
---
masakari-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
masakari-monitor_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
mistral-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -3,14 +3,17 @@
# neutron-server, neutron-agents
network_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% if _neutron_plugin_driver == 'ml2.ovn' %}
network-gateway_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
network-northd_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% endif %}

View File

@@ -1,8 +1,10 @@
---
compute-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
compute_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,4 +1,5 @@
# The controller host that the octavia control plane will be run on
octavia-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
placement-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
skyline_dashboard_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -16,7 +16,8 @@ global_overrides:
default: True
swift-proxy_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
container_vars:
swift_proxy_vars:
limit_container_types: swift_proxy
@@ -25,7 +26,8 @@ swift-proxy_hosts:
write_affinity_node_count: "1 * replicas"
swift_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
container_vars:
swift_vars:
limit_container_types: swift

View File

@@ -1,4 +1,5 @@
---
mano_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
trove-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,3 +1,4 @@
unbound_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -1,7 +1,9 @@
---
zun-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
zun-compute_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}

View File

@@ -184,7 +184,8 @@ global_overrides:
# keystone
identity_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% if 'keystone' in bootstrap_host_scenarios or 'infra' in bootstrap_host_scenarios %}
# NOTE (jrosser) this ensures that we deploy 3 keystone containers
# during the os_keystone role test to validate ssh keys and fernet key sync
@@ -197,7 +198,8 @@ identity_hosts:
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% if 'infra' in bootstrap_host_scenarios_expanded %}
affinity:
galera_container: 3
@@ -215,12 +217,14 @@ repo-infra_hosts:
affinity:
repo_container: 3
{% endif %}
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip: {{ bootstrap_host_management_address }}
{% if 'zookeeper' in bootstrap_host_scenarios_expanded %}
coordination_hosts:
aio1:
ip: {{ bootstrap_host_management_address }}
ip: {{ bootstrap_host_public_address }}
management_ip:{{ bootstrap_host_management_address }}
{% if 'infra' in bootstrap_host_scenarios_expanded %}
affinity:
zookeeper_container: 3

View File

@@ -32,7 +32,8 @@ def make_example_config(aio_config_file, configs_dir):
autoescape=jinja2.select_autoescape())
files = glob.glob(os.path.join(configs_dir, '*.aio'))
templated_variables = {
'bootstrap_host_management_address': '172.29.236.100'
'bootstrap_host_management_address': None,
'bootstrap_host_public_address': '172.29.236.100'
}
for file_name in files:
with open(file_name, 'r') as f:

View File

@@ -0,0 +1,9 @@
---
other:
- |
A behavior for AIO setup regarding network configuration has been changed.
Instead of directly applying all configuration during ``boostrap-aio.sh``
script, script will produce another variable file
``user_variables_systemd.yml`` with intended configuration.
Variable will be consumed and applied by ``openstack_hosts`` role, which
will be launched during setup-hosts (setup-everyting) playbook.

View File

@@ -40,12 +40,3 @@
name: "{{ _ssh_service_name[ansible_facts['os_family'] | lower] }}"
state: started
enabled: true
post_tasks:
- name: Check that new network interfaces are up
ansible.builtin.assert:
that:
- ansible_facts['eth12']['active'] | bool
- ansible_facts['eth13']['active'] | bool
- ansible_facts['eth14']['active'] | bool
- name: Clear facts
ansible.builtin.meta: clear_facts

View File

@@ -218,7 +218,13 @@
# Prepare the network interfaces
- name: Import prepare_networking tasks
ansible.builtin.import_tasks: prepare_networking.yml
ansible.builtin.include_tasks: prepare_networking.yml
args:
apply:
tags:
- prepare-networking
when:
- ansible_facts['os_family'] == 'RedHat'
tags:
- prepare-networking

View File

@@ -117,6 +117,16 @@
when:
- ansible_facts['selinux']['status'] == "enabled"
- name: Create vars override folders if we need to test them
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0755"
with_items:
- /etc/openstack_deploy/group_vars
- /etc/openstack_deploy/host_vars
- /etc/openstack_deploy/host_vars/aio1
- name: Set the user_variables
openstack.config_template.config_template:
src: "{{ bootstrap_user_variables_template }}"
@@ -141,6 +151,9 @@
when:
- "item.condition | bool"
with_items:
- src: aio1_networks.yml.j2
dest: host_vars/aio1/networks.yml
condition: true
- src: user_variables_ceph.yml.j2
dest: user_variables_ceph.yml
condition: "{{ 'ceph' in bootstrap_host_scenarios_expanded }}"
@@ -198,16 +211,6 @@
when:
- "'ceph' in bootstrap_host_scenarios_expanded"
- name: Create vars override folders if we need to test them
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0755"
with_items:
- /etc/openstack_deploy/group_vars
- /etc/openstack_deploy/host_vars
when: "(lookup('env','ACTION') | default(false,true)) == 'varstest'"
- name: Create user-space overrides
ansible.builtin.lineinfile:
path: "{{ item.path }}"

View File

@@ -18,7 +18,6 @@
register: firewalld_check
changed_when: false
failed_when: firewalld_check.rc > 1
when: ansible_facts['os_family'] == 'RedHat'
- name: Disable firewalld
become: true
@@ -29,7 +28,6 @@
with_items:
- firewalld
when:
- ansible_facts['os_family'] == 'RedHat'
- firewalld_check.rc == 0
- name: Disable NetworkManager
@@ -39,212 +37,3 @@
state: stopped
enabled: false
masked: true
when:
- ansible_facts['os_family'] == 'RedHat'
- name: Run the systemd-networkd role
ansible.builtin.include_role:
name: systemd_networkd
vars:
systemd_networkd_prefix: "osa_testing"
systemd_networkd_epel_mirror: "https://dl.fedoraproject.org/pub/epel"
systemd_interface_cleanup: true
systemd_run_networkd: true
systemd_netdevs:
- NetDev:
Name: dummy-squid
Kind: dummy
- NetDev:
Name: dummy-mgmt
Kind: dummy
- NetDev:
Name: dummy-vxlan
Kind: dummy
- NetDev:
Name: dummy-storage
Kind: dummy
- NetDev:
Name: dummy-vlan
Kind: dummy
- NetDev:
Name: dummy-dbaas
Kind: dummy
- NetDev:
Name: dummy-lbaas
Kind: dummy
- NetDev:
Name: dummy-bmaas
Kind: dummy
- NetDev:
Name: br-mgmt
Kind: bridge
- NetDev:
Name: br-vxlan
Kind: bridge
- NetDev:
Name: br-storage
Kind: bridge
- NetDev:
Name: br-vlan
Kind: bridge
- NetDev:
Name: br-dbaas
Kind: bridge
- NetDev:
Name: br-lbaas
Kind: bridge
- NetDev:
Name: br-bmaas
Kind: bridge
- NetDev:
Name: br-vlan-veth
Kind: veth
Peer:
Name: eth12
- NetDev:
Name: br-dbaas-veth
Kind: veth
Peer:
Name: eth13
- NetDev:
Name: br-lbaas-veth
Kind: veth
Peer:
Name: eth14
- NetDev:
Name: br-bmaas-veth
Kind: veth
Peer:
Name: eth15
systemd_networks:
- interface: "dummy-squid"
address: "{{ squid_network }}"
- interface: "dummy-mgmt"
bridge: "br-mgmt"
mtu: 9000
- interface: "br-mgmt"
config_overrides:
Network:
Address: "{{ {
(bootstrap_host_management_address ~ '/' ~ mgmt_network | ansible.utils.ipaddr('netmask')) | ansible.utils.ipaddr('host/prefix'): None,
(bootstrap_host_internal_address ~ '/' ~ mgmt_network | ansible.utils.ipaddr('netmask')) | ansible.utils.ipaddr('host/prefix'): None
} }}"
- interface: "dummy-storage"
bridge: "br-storage"
mtu: 9000
- interface: "br-storage"
address: "{{ storage_network | ansible.utils.nthhost('100') }}"
netmask: "{{ storage_network | ansible.utils.ipaddr('netmask') }}"
- interface: "dummy-dbaas"
bridge: "br-dbaas"
mtu: 9000
- interface: "br-dbaas"
address: "{{ dbaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ dbaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-dbaas-veth"
bridge: "br-dbaas"
mtu: 9000
- interface: "dummy-lbaas"
bridge: "br-lbaas"
mtu: 9000
- interface: "br-lbaas"
address: "{{ lbaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ lbaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-lbaas-veth"
bridge: "br-lbaas"
mtu: 9000
- interface: "dummy-bmaas"
bridge: "br-bmaas"
mtu: 1500
- interface: "br-bmaas"
address: "{{ bmaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ bmaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-bmaas-veth"
bridge: "br-bmaas"
mtu: 1500
- interface: "dummy-vxlan"
bridge: "br-vxlan"
mtu: 9000
- interface: "br-vxlan"
address: "{{ vxlan_network | ansible.utils.nthhost('100') }}"
netmask: "{{ vxlan_network | ansible.utils.ipaddr('netmask') }}"
- interface: "dummy-vlan"
bridge: "br-vlan"
mtu: 9000
- interface: "br-vlan"
config_overrides:
Network:
Address: "{{ {
vlan_network | ansible.utils.ipaddr('100'): None,
vlan_network | ansible.utils.ipaddr('1'): None
} }}"
- interface: "br-vlan-veth"
bridge: "br-vlan"
mtu: 9000
tags:
- network-config
# NOTE(jrosser) The systemd_networkd role uses a handler to restart the networking service
# This will normally not run until the end of the play, so we must force it here
- name: Force systemd_networkd hander to run
ansible.builtin.meta: flush_handlers
- name: Run the systemd service role
ansible.builtin.include_role:
name: systemd_service
vars:
systemd_services:
- service_name: "networking-post-up"
config_overrides:
Unit:
Description: networking-post-up
After: network-online.target
Wants: network-online.target
Service:
RemainAfterExit: true
service_type: oneshot
execstarts:
- "-{{ bootstrap_host_iptables_path }} -t nat -A POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE"
- "-{{ bootstrap_host_ethtool_path }} -K br-mgmt gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-vxlan gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-storage gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-vlan gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-dbaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-lbaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-bmaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth12 up"
- "-{{ bootstrap_host_ip_path }} link set br-vlan-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth12 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth13 up"
- "-{{ bootstrap_host_ip_path }} link set br-dbaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth13 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth14 up"
- "-{{ bootstrap_host_ip_path }} link set br-lbaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth14 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth15 up"
- "-{{ bootstrap_host_ip_path }} link set br-bmaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth15 gso off sg off tso off tx off"
execstops:
- "{{ bootstrap_host_iptables_path }} -t nat -D POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE"
enabled: true
state: started
systemd_tempd_prefix: openstack
tags:
- network-config
- name: Updating the facts due to net changes
ansible.builtin.setup:
gather_subset: network
tags:
- networking

View File

@@ -13,6 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(noonedeadpunk): We can not offload this to openstack_hosts, as Squid should be already
# operational for it.
- name: Prepare Squid interface
ansible.builtin.include_role:
name: systemd_networkd
vars:
systemd_networkd_prefix: "osa_proxy"
systemd_networkd_epel_mirror: "{{ (nodepool_vars is defined) | ternary(nodepool_vars.NODEPOOL_EPEL_MIRROR, 'http://download.fedoraproject.org/pub/epel') }}"
systemd_interface_cleanup: true
systemd_run_networkd: true
systemd_netdevs:
- NetDev:
Name: dummy-squid
Kind: dummy
systemd_networks:
- interface: "dummy-squid"
address: "{{ squid_network }}"
- name: Install squid packages
ansible.builtin.package:
name: squid

View File

@@ -0,0 +1,186 @@
---
# Copyright 2025, Cleura AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
systemd_interface_cleanup: true
aio_systemd_networkd_devices_bootstrap:
- NetDev:
Name: dummy-mgmt
Kind: dummy
- NetDev:
Name: dummy-vxlan
Kind: dummy
- NetDev:
Name: dummy-storage
Kind: dummy
- NetDev:
Name: dummy-vlan
Kind: dummy
- NetDev:
Name: dummy-dbaas
Kind: dummy
- NetDev:
Name: dummy-lbaas
Kind: dummy
- NetDev:
Name: dummy-bmaas
Kind: dummy
- NetDev:
Name: br-mgmt
Kind: bridge
- NetDev:
Name: br-vxlan
Kind: bridge
- NetDev:
Name: br-storage
Kind: bridge
- NetDev:
Name: br-vlan
Kind: bridge
- NetDev:
Name: br-dbaas
Kind: bridge
- NetDev:
Name: br-lbaas
Kind: bridge
- NetDev:
Name: br-bmaas
Kind: bridge
- NetDev:
Name: br-vlan-veth
Kind: veth
Peer:
Name: eth12
- NetDev:
Name: br-dbaas-veth
Kind: veth
Peer:
Name: eth13
- NetDev:
Name: br-lbaas-veth
Kind: veth
Peer:
Name: eth14
- NetDev:
Name: br-bmaas-veth
Kind: veth
Peer:
Name: eth15
aio_systemd_networkd_networks_bootstrap:
- interface: "dummy-mgmt"
bridge: "br-mgmt"
mtu: 9000
- interface: "br-mgmt"
config_overrides:
Network:
Address:
? {{ [bootstrap_host_management_address, mgmt_network | ansible.utils.ipaddr('prefix')] | join('/') }}
? {{ [bootstrap_host_internal_address, mgmt_network | ansible.utils.ipaddr('prefix')] | join('/') }}
- interface: "dummy-storage"
bridge: "br-storage"
mtu: 9000
- interface: "br-storage"
address: "{{ storage_network | ansible.utils.nthhost('100') }}"
netmask: "{{ storage_network | ansible.utils.ipaddr('netmask') }}"
- interface: "dummy-dbaas"
bridge: "br-dbaas"
mtu: 9000
- interface: "br-dbaas"
address: "{{ dbaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ dbaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-dbaas-veth"
bridge: "br-dbaas"
mtu: 9000
- interface: "dummy-lbaas"
bridge: "br-lbaas"
mtu: 9000
- interface: "br-lbaas"
address: "{{ lbaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ lbaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-lbaas-veth"
bridge: "br-lbaas"
mtu: 9000
- interface: "dummy-bmaas"
bridge: "br-bmaas"
mtu: 1500
- interface: "br-bmaas"
address: "{{ bmaas_network | ansible.utils.nthhost('100') }}"
netmask: "{{ bmaas_network | ansible.utils.ipaddr('netmask') }}"
- interface: "br-bmaas-veth"
bridge: "br-bmaas"
mtu: 1500
- interface: "dummy-vxlan"
bridge: "br-vxlan"
mtu: 9000
- interface: "br-vxlan"
address: "{{ vxlan_network | ansible.utils.nthhost('100') }}"
netmask: "{{ vxlan_network | ansible.utils.ipaddr('netmask') }}"
- interface: "dummy-vlan"
bridge: "br-vlan"
mtu: 9000
- interface: "br-vlan"
config_overrides:
Network:
Address:
? {{ vlan_network | ansible.utils.ipaddr('100') }}
? {{ vlan_network | ansible.utils.ipaddr('1') }}
- interface: "br-vlan-veth"
bridge: "br-vlan"
mtu: 9000
aio_systemd_services_postnetwork:
- service_name: "networking-post-up"
config_overrides:
Unit:
Description: networking-post-up
After: network-online.target
Wants: network-online.target
Service:
RemainAfterExit: true
service_type: oneshot
execstarts:
- "-{{ bootstrap_host_iptables_path }} -t nat -A POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE"
- "-{{ bootstrap_host_ethtool_path }} -K br-mgmt gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-vxlan gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-storage gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-vlan gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-dbaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-lbaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ethtool_path }} -K br-bmaas gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth12 up"
- "-{{ bootstrap_host_ip_path }} link set br-vlan-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth12 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth13 up"
- "-{{ bootstrap_host_ip_path }} link set br-dbaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth13 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth14 up"
- "-{{ bootstrap_host_ip_path }} link set br-lbaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth14 gso off sg off tso off tx off"
- "-{{ bootstrap_host_ip_path }} link set eth15 up"
- "-{{ bootstrap_host_ip_path }} link set br-bmaas-veth up"
- "-{{ bootstrap_host_ethtool_path }} -K eth15 gso off sg off tso off tx off"
execstops:
- "{{ bootstrap_host_iptables_path }} -t nat -D POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE"
enabled: true
state: started

View File

@@ -44,6 +44,13 @@ openstack_hosts_journald_config:
SystemMaxFileSize: 100M
RuntimeMaxFileSize: 100M
{% raw %}
openstack_hosts_systemd_services: "{{ query('vars', *query('varnames', '^aio_systemd_services_')) | flatten(levels=1) }}"
openstack_hosts_systemd_networkd_networks: "{{ query('vars', *query('varnames', '^aio_systemd_networkd_networks_')) | flatten(levels=1) }}"
openstack_hosts_systemd_networkd_devices: "{{ query('vars', *query('varnames', '^aio_systemd_networkd_devices_')) | flatten(levels=1) }}"
openstack_hosts_systemd_networkd_prefix: "osa_testing"
{% endraw %}
## Galera settings
galera_monitoring_allowed_source: "0.0.0.0/0"
# TODO(noonedeadpunk): This should be enabled, once we will re-work SSL part
@@ -306,7 +313,7 @@ octavia_management_net_subnet_allocation_pools: {{ lbaas_network | ansible.utils
{% if 'metal' in bootstrap_host_scenarios %}
# TODO(mnaser): The Octavia role relies on gathering IPs of hosts in the
# LBaaS network and using those in the health manager pool
# IPs. We don't store those IPs when running metal so we
# IPs. We do not store those IPs when running metal so we
# have to override it manually. We should remove this and
# fix the role (or the inventory tool) eventually.
octavia_hm_hosts: {{ lbaas_network | ansible.utils.nthhost('100') }} # br-lbaas IP