Merge "[octavia] Add test case for load balancer"

This commit is contained in:
Zuul
2025-07-31 06:26:33 +00:00
committed by Gerrit Code Review
30 changed files with 1545 additions and 305 deletions

View File

@@ -0,0 +1,26 @@
#!/bin/bash
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
HOSTNAME=$(hostname -s)
PORTNAME=octavia-worker-port-$HOSTNAME
HM_PORT_ID=$(openstack port show $PORTNAME -c id -f value)
HM_PORT_MAC=$(openstack port show $PORTNAME -c mac_address -f value)
echo $HM_PORT_ID > /tmp/pod-shared/HM_PORT_ID
echo $HM_PORT_MAC > /tmp/pod-shared/HM_PORT_MAC

View File

@@ -0,0 +1,31 @@
#!/bin/bash
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
HM_PORT_ID=$(cat /tmp/pod-shared/HM_PORT_ID)
HM_PORT_MAC=$(cat /tmp/pod-shared/HM_PORT_MAC)
ovs-vsctl --no-wait show
ovs-vsctl --may-exist add-port br-int o-w0 \
-- set Interface o-w0 type=internal \
-- set Interface o-w0 external-ids:iface-status=active \
-- set Interface o-w0 external-ids:attached-mac=$HM_PORT_MAC \
-- set Interface o-w0 external-ids:iface-id=$HM_PORT_ID \
-- set Interface o-w0 external-ids:skip_cleanup=true
ip link set dev o-w0 address $HM_PORT_MAC

View File

@@ -20,6 +20,13 @@ set -ex
COMMAND="${@:-start}"
function start () {
cat > /tmp/dhclient.conf <<EOF
request subnet-mask,broadcast-address,interface-mtu;
do-forward-updates false;
EOF
dhclient -v o-w0 -cf /tmp/dhclient.conf
exec octavia-worker \
--config-file /etc/octavia/octavia.conf
}

View File

@@ -59,6 +59,10 @@ data:
{{ tuple "bin/_octavia-housekeeping.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
octavia-worker.sh: |
{{ tuple "bin/_octavia-worker.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
octavia-worker-nic-init.sh: |
{{ tuple "bin/_octavia-worker-nic-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
octavia-worker-get-port.sh: |
{{ tuple "bin/_octavia-worker-get-port.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rabbit-init.sh: |
{{- include "helm-toolkit.scripts.rabbit_init" . | indent 4 }}
{{- end }}

View File

@@ -14,17 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_worker }}
{{- $envAll := . }}
{{- define "octavia.worker.daemonset" }}
{{- $daemonset := index . 0 }}
{{- $configMapName := index . 1 }}
{{- $serviceAccountName := index . 2 }}
{{- $envAll := index . 3 }}
{{- with $envAll }}
{{- $mounts_octavia_worker := .Values.pod.mounts.octavia_worker.octavia_worker }}
{{- $mounts_octavia_worker_init := .Values.pod.mounts.octavia_worker.init_container }}
{{- $serviceAccountName := "octavia-worker" }}
{{ tuple $envAll "worker" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: apps/v1
kind: Deployment
kind: DaemonSet
metadata:
name: octavia-worker
annotations:
@@ -32,11 +34,10 @@ metadata:
labels:
{{ tuple $envAll "octavia" "worker" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
spec:
replicas: {{ .Values.pod.replicas.worker }}
selector:
matchLabels:
{{ tuple $envAll "octavia" "worker" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
{{ tuple $envAll "worker" | include "helm-toolkit.snippets.kubernetes_upgrades_daemonset" | indent 2 }}
template:
metadata:
labels:
@@ -51,12 +52,42 @@ spec:
serviceAccountName: {{ $serviceAccountName }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
affinity:
{{ tuple $envAll "octavia" "worker" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
# hostPID: true
nodeSelector:
{{ .Values.labels.worker.node_selector_key }}: {{ .Values.labels.worker.node_selector_value }}
initContainers:
{{ tuple $envAll "worker" $mounts_octavia_worker_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
- name: octavia-worker-get-port
{{ tuple $envAll "octavia_worker_init" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.worker | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
{{- with $env := dict "ksUserSecret" ( index $envAll.Values.secrets.identity "admin" ) }}
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
{{- end }}
command:
- /tmp/octavia-worker-get-port.sh
volumeMounts:
- name: pod-shared
mountPath: /tmp/pod-shared
- name: octavia-bin
mountPath: /tmp/octavia-worker-get-port.sh
subPath: octavia-worker-get-port.sh
readOnly: true
- name: octavia-worker-nic-init
{{ tuple $envAll "openvswitch_vswitchd" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.worker | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "octavia_worker" "container" "octavia_worker_nic_init" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/octavia-worker-nic-init.sh
volumeMounts:
- name: pod-shared
mountPath: /tmp/pod-shared
- name: octavia-bin
mountPath: /tmp/octavia-worker-nic-init.sh
subPath: octavia-worker-nic-init.sh
readOnly: true
- name: run
mountPath: /run
containers:
- name: octavia-worker
{{ tuple $envAll "octavia_worker" | include "helm-toolkit.snippets.image" | indent 10 }}
@@ -98,7 +129,28 @@ spec:
defaultMode: 0555
- name: octavia-etc
secret:
secretName: octavia-etc
secretName: {{ $configMapName }}
defaultMode: 0444
- name: pod-shared
emptyDir: {}
- name: run
hostPath:
path: /run
{{ if $mounts_octavia_worker.volumes }}{{ toYaml $mounts_octavia_worker.volumes | indent 8 }}{{ end }}
{{- end }}
{{- end }}
{{- if .Values.manifests.daemonset_worker }}
{{- $envAll := . }}
{{- $daemonset := "worker" }}
{{- $configMapName := "octavia-etc" }}
{{- $serviceAccountName := "octavia-worker" }}
{{- $dependencyOpts := dict "envAll" $envAll "dependencyMixinParam" $envAll.Values.network.backend "dependencyKey" "worker" -}}
{{- $_ := include "helm-toolkit.utils.dependency_resolver" $dependencyOpts | toString | fromYaml }}
{{ tuple $envAll "worker" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include "octavia.worker.daemonset" | toString | fromYaml }}
{{- $configmap_yaml := "octavia.configmap.etc" }}
{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "helm-toolkit.utils.daemonset_overrides" }}
{{- end }}

View File

@@ -33,7 +33,6 @@ httpGet:
{{- $envAll := . }}
{{- $mounts_octavia_api := .Values.pod.mounts.octavia_api.octavia_api }}
{{- $mounts_octavia_driver_agent := .Values.pod.mounts.octavia_api.octavia_driver_agent }}
{{- $mounts_octavia_api_init := .Values.pod.mounts.octavia_api.init_container }}
{{- $serviceAccountName := "octavia-api" }}
@@ -92,8 +91,6 @@ spec:
{{ dict "envAll" $envAll "component" "api" "container" "octavia-api" "type" "readiness" "probeTemplate" (include "octaviaApiReadinessProbeTemplate" $envAll | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | indent 10 }}
{{ dict "envAll" $envAll "component" "api" "container" "octavia-api" "type" "liveness" "probeTemplate" (include "octaviaApiLivenessProbeTemplate" $envAll | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | indent 10 }}
volumeMounts:
- name: run-openvswitch
mountPath: /var/run/ovn
- name: pod-etc-octavia
mountPath: /etc/octavia
- name: octavia-bin
@@ -114,54 +111,9 @@ spec:
subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}
readOnly: true
{{- end }}
- name: octavia-driver-agents
mountPath: /var/run/octavia
{{ if $mounts_octavia_api.volumeMounts }}{{ toYaml $mounts_octavia_api.volumeMounts | indent 12 }}{{ end }}
- name: octavia-driver-agent
{{ tuple $envAll "octavia_driver_agent" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.driver_agent | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "octavia_api" "container" "octavia_driver_agent" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/octavia-driver-agent.sh
- start
lifecycle:
preStop:
exec:
command:
- /tmp/octavia-driver-agent.sh
- stop
volumeMounts:
- name: pod-etc-octavia
mountPath: /etc/octavia
readOnly: true
- name: octavia-bin
mountPath: /tmp/octavia-driver-agent.sh
subPath: octavia-driver-agent.sh
readOnly: true
- name: octavia-etc
mountPath: /etc/octavia/octavia.conf
subPath: octavia.conf
readOnly: true
{{- if .Values.conf.octavia.DEFAULT.log_config_append }}
- name: octavia-etc
mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}
subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}
readOnly: true
{{- end }}
- name: octavia-driver-agents
mountPath: /var/run/octavia
- name: run-openvswitch
mountPath: /var/run/ovn
{{ if $mounts_octavia_driver_agent.volumeMounts }}{{ toYaml $mounts_octavia_driver_agent.volumeMounts | indent 12 }}{{ end }}
volumes:
- name: pod-etc-octavia
emptyDir: {}
- name: run-openvswitch
hostPath:
path: /run/openvswitch
type: DirectoryOrCreate
- name: octavia-driver-agents
emptyDir: {}
- name: octavia-bin
configMap:
name: octavia-bin

View File

@@ -0,0 +1,117 @@
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- define "octaviaDriverAgentLivenessProbeTemplate" }}
{{- end }}
{{- define "octaviaDriverAgentReadinessProbeTemplate" }}
{{- end }}
{{- if .Values.manifests.deployment_driver_agent }}
{{- $envAll := . }}
{{- $mounts_octavia_driver_agent := .Values.pod.mounts.octavia_driver_agent.octavia_driver_agent }}
{{- $mounts_octavia_dirver_agent_init := .Values.pod.mounts.octavia_driver_agent.init_container }}
{{- $serviceAccountName := "octavia-driver-agent" }}
{{ tuple $envAll "driver_agent" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: octavia-driver-agent
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
labels:
{{ tuple $envAll "octavia" "driver_agent" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
spec:
replicas: {{ .Values.pod.replicas.driver_agent }}
selector:
matchLabels:
{{ tuple $envAll "octavia" "driver_agent" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
template:
metadata:
labels:
{{ tuple $envAll "octavia" "driver_agent" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
{{ tuple "octavia_driver_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
spec:
{{ tuple "octavia_driver_agent" . | include "helm-toolkit.snippets.kubernetes_pod_priority_class" | indent 6 }}
{{ tuple "octavia_driver_agent" . | include "helm-toolkit.snippets.kubernetes_pod_runtime_class" | indent 6 }}
serviceAccountName: {{ $serviceAccountName }}
affinity:
{{ tuple $envAll "octavia" "driver_agent" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.driver_agent.node_selector_key }}: {{ .Values.labels.driver_agent.node_selector_value }}
initContainers:
{{ tuple $envAll "driver_agent" $mounts_octavia_dirver_agent_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: octavia-driver-agent
{{ tuple $envAll "octavia_driver_agent" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.driver_agent | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "octavia_driver_agent" "container" "octavia_driver_agent" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/octavia-driver-agent.sh
- start
lifecycle:
preStop:
exec:
command:
- /tmp/octavia-driver-agent.sh
- stop
volumeMounts:
- name: pod-etc-octavia
mountPath: /etc/octavia
- name: octavia-bin
mountPath: /tmp/octavia-driver-agent.sh
subPath: octavia-driver-agent.sh
readOnly: true
- name: octavia-etc
mountPath: /etc/octavia/octavia.conf
subPath: octavia.conf
readOnly: true
{{- if .Values.conf.octavia.DEFAULT.log_config_append }}
- name: octavia-etc
mountPath: {{ .Values.conf.octavia.DEFAULT.log_config_append }}
subPath: {{ base .Values.conf.octavia.DEFAULT.log_config_append }}
readOnly: true
{{- end }}
- name: octavia-driver-agents
mountPath: /var/run/octavia
- name: run-openvswitch
mountPath: /var/run/ovn
{{ if $mounts_octavia_driver_agent.volumeMounts }}{{ toYaml $mounts_octavia_driver_agent.volumeMounts | indent 12 }}{{ end }}
volumes:
- name: pod-etc-octavia
emptyDir: {}
- name: run-openvswitch
hostPath:
path: /run/openvswitch
type: DirectoryOrCreate
- name: octavia-driver-agents
emptyDir: {}
- name: octavia-bin
configMap:
name: octavia-bin
defaultMode: 0555
- name: octavia-etc
secret:
secretName: octavia-etc
defaultMode: 0444
{{ if $mounts_octavia_driver_agent.volumes }}{{ toYaml $mounts_octavia_driver_agent.volumes | indent 8 }}{{ end }}
{{- end }}

View File

@@ -24,8 +24,11 @@ labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
driver_agent:
node_selector_key: openstack-network-node
node_selector_value: enabled
worker:
node_selector_key: openstack-control-plane
node_selector_key: openstack-network-node
node_selector_value: enabled
housekeeping:
node_selector_key: openstack-control-plane
@@ -40,23 +43,24 @@ labels:
images:
tags:
test: docker.io/xrally/xrally-openstack:2.0.0
bootstrap: quay.io/airshipit/heat:2024.1-ubuntu_jammy
db_init: quay.io/airshipit/heat:2024.1-ubuntu_jammy
octavia_db_sync: quay.io/airshipit/octavia:master-ubuntu
db_drop: quay.io/airshipit/heat:2024.1-ubuntu_jammy
bootstrap: quay.io/airshipit/heat:2025.1-ubuntu_jammy
db_init: quay.io/airshipit/heat:2025.1-ubuntu_jammy
octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
db_drop: quay.io/airshipit/heat:2025.1-ubuntu_jammy
rabbit_init: docker.io/rabbitmq:3.13-management
ks_user: quay.io/airshipit/heat:2024.1-ubuntu_jammy
ks_service: quay.io/airshipit/heat:2024.1-ubuntu_jammy
ks_endpoints: quay.io/airshipit/heat:2024.1-ubuntu_jammy
ks_user: quay.io/airshipit/heat:2025.1-ubuntu_jammy
ks_service: quay.io/airshipit/heat:2025.1-ubuntu_jammy
ks_endpoints: quay.io/airshipit/heat:2025.1-ubuntu_jammy
dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
image_repo_sync: docker.io/docker:17.07.0
octavia_api: quay.io/airshipit/octavia:2024.1-ubuntu_jammy
octavia_driver_agent: quay.io/airshipit/octavia:2024.1-ubuntu_jammy
octavia_worker: quay.io/airshipit/octavia:2024.1-ubuntu_jammy
octavia_housekeeping: quay.io/airshipit/octavia:2024.1-ubuntu_jammy
octavia_health_manager: quay.io/airshipit/octavia:2024.1-ubuntu_jammy
octavia_health_manager_init: quay.io/airshipit/openstack-client:2024.1-ubuntu_jammy
openvswitch_vswitchd: docker.io/openstackhelm/openvswitch:latest-ubuntu_focal
octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy
octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy
openvswitch_vswitchd: docker.io/openstackhelm/openvswitch:latest-ubuntu_jammy
pull_policy: "IfNotPresent"
local_registry:
active: false
@@ -117,6 +121,27 @@ dependencies:
service: oslo_cache
- endpoint: internal
service: network
driver_agent:
jobs:
- octavia-db-sync
- octavia-ks-user
- octavia-ks-endpoints
- octavia-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: oslo_db_persistence
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: oslo_cache
- endpoint: internal
service: network
- endpoint: internal
service: load_balancer
worker:
jobs:
- octavia-db-sync
@@ -222,8 +247,8 @@ conf:
DEFAULT:
log_config_append: /etc/octavia/logging.conf
ovn:
ovn_nb_connection: unix:/run/ovn/ovnnb_db.sock
ovn_sb_connection: unix:/run/ovn/ovnsb_db.sock
ovn_nb_connection: unix:/var/run/ovn/ovnnb_db.sock
ovn_sb_connection: unix:/var/run/ovn/ovnsb_db.sock
api_settings:
api_handler: queue_producer
bind_host: 0.0.0.0
@@ -233,7 +258,7 @@ conf:
health_manager:
bind_port: 5555
bind_ip: 0.0.0.0
controller_ip_port_list: 0.0.0.0:5555
controller_ip_port_list: null
heartbeat_key: insecure
keystone_authtoken:
auth_type: password
@@ -241,12 +266,12 @@ conf:
memcache_security_strategy: ENCRYPT
service_type: load-balancer
certificates:
ca_private_key_passphrase: foobar
ca_private_key: /etc/octavia/certs/private/cakey.pem
ca_certificate: /etc/octavia/certs/ca_01.pem
ca_private_key_passphrase: not-secure-passphrase
ca_private_key: /etc/octavia/certs/private/server_ca.key.pem
ca_certificate: /etc/octavia/certs/server_ca.cert.pem
haproxy_amphora:
server_ca: /etc/octavia/certs/ca_01.pem
client_cert: /etc/octavia/certs/client.pem
server_ca: /etc/octavia/certs/server_ca-chain.cert.pem
client_cert: /etc/octavia/certs/private/client.cert-and-key.pem
base_path: /var/lib/octavia
base_cert_dir: /var/lib/octavia/certs
controller_worker:
@@ -263,6 +288,7 @@ conf:
amp_active_retries: 100
amp_active_wait_sec: 2
loadbalancer_topology: SINGLE
client_ca: /etc/octavia/certs/client_ca.cert.pem
oslo_messaging:
topic: octavia_prov
rpc_thread_pool_size: 2
@@ -293,10 +319,10 @@ conf:
- context
- default
logger_root:
level: WARNING
handlers: 'null'
level: INFO
handlers: stdout
logger_octavia:
level: WARNING
level: INFO
handlers:
- stdout
qualname: octavia
@@ -589,14 +615,27 @@ pod:
capabilities:
add:
- SYS_NICE
octavia_driver_agent:
container:
octavia_driver_agent:
capabilities:
add:
- SYS_NICE
runAsUser: 42424
octavia_worker:
container:
octavia_worker_nic_init:
runAsUser: 0
capabilities:
add:
- NET_ADMIN
- NET_RAW
- NET_BIND_SERVICE
octavia_worker:
runAsUser: 42424
runAsUser: 0
capabilities:
add:
- NET_ADMIN
octavia_housekeeping:
container:
octavia_housekeeping:
@@ -627,6 +666,8 @@ pod:
octavia_api:
volumeMounts:
volumes:
octavia_driver_agent:
init_container: null
octavia_driver_agent:
volumeMounts:
volumes:
@@ -652,7 +693,7 @@ pod:
volumes:
replicas:
api: 1
worker: 1
driver_agent: 1
housekeeping: 1
lifecycle:
upgrades:
@@ -792,8 +833,9 @@ manifests:
configmap_bin: true
configmap_etc: true
daemonset_health_manager: true
daemonset_worker: true
deployment_api: true
deployment_worker: true
deployment_driver_agent: true
deployment_housekeeping: true
ingress_api: true
job_bootstrap: true

View File

@@ -0,0 +1,8 @@
---
octavia:
- Run driver agent as a separate deployment on network nodes
- Run worker as a daemonset instead of deployment on network nodes
- |
Worker daemonset creates an interface attached to the
Octavia management network to get access to amphora instances
...

View File

@@ -8,5 +8,5 @@ spec:
# we need Calico to skip this interface while discovering the
# network changes on the host to prevent announcing unnecessary networks.
- name: IP_AUTODETECTION_METHOD
value: "skip-interface=br-ex|provider.*|client.*"
value: "skip-interface=br-ex|provider.*|client.*|o-hm.*|o-w.*"
...

View File

@@ -0,0 +1,31 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Set cluster device
set_fact:
default_dev: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}"
- name: Stats
shell: |
echo {{ default_dev }} > /tmp/inventory_default_dev.txt
echo -n > /tmp/inventory_k8s_control_plane.txt
{% for host in (groups['k8s_control_plane'] | default([])) %}
echo {{ hostvars[host].ansible_hostname }} >> /tmp/inventory_k8s_control_plane.txt
{% endfor %}
echo -n > /tmp/inventory_k8s_nodes.txt
{% for host in (groups['k8s_nodes'] | default([])) %}
echo {{ hostvars[host].ansible_hostname }} >> /tmp/inventory_k8s_nodes.txt
{% endfor %}
...

View File

@@ -108,4 +108,10 @@
when:
- ingress_setup
- inventory_hostname in (groups['primary'] | default([]))
- name: Include env inventory tasks
include_tasks:
file: env_inventory.yaml
when:
- inventory_hostname in (groups['primary'] | default([]))
...

View File

@@ -26,6 +26,10 @@
shell: |
iptables -t nat -A POSTROUTING -o {{ cluster_default_dev }} -s {{ openstack_provider_network_cidr }} -j MASQUERADE
- name: Set up FORWARD for packets going from VMs
shell: |
iptables -t filter -I FORWARD -s {{ openstack_provider_network_cidr }} -j ACCEPT
# We use tcp proxy to forward traffic to make it possible to connect
# to the Openstack public endpoint (managed by Metallb) from VMs.
- name: Setup TCP proxy

View File

@@ -35,6 +35,7 @@
- git
- git-review
- gnupg2
- htop
- iptables
- ipvsadm
- jq

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
cat >> ${HOME}/.bashrc <<EOF
export RUN_HELM_TESTS=no
export OS_CLOUD="openstack_helm"
export OPENSTACK_RELEASE="${OPENSTACK_RELEASE}"
export CONTAINER_DISTRO_NAME="${CONTAINER_DISTRO_NAME}"
export CONTAINER_DISTRO_VERSION="${CONTAINER_DISTRO_VERSION}"
export FEATURES="${FEATURES}"
EOF

View File

@@ -1,84 +0,0 @@
#!/bin/bash
# Copyright 2019 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export OS_CLOUD=openstack_helm
: ${OSH_LB_SUBNET:="172.31.0.0/24"}
: ${OSH_LB_SUBNET_START:="172.31.0.2"}
: ${OSH_LB_SUBNET_END="172.31.0.200"}
: ${OSH_LB_AMPHORA_IMAGE_NAME:="amphora-x64-haproxy"}
: ${OSH_AMPHORA_IMAGE_FILE_PATH:=""}
sudo pip3 install python-octaviaclient==1.6.0
# NOTE(hagun.kim): These resources are required to use Octavia service.
# Create Octavia management network and its security group
openstack network create lb-mgmt-net -f value -c id
openstack subnet create --subnet-range $OSH_LB_SUBNET --allocation-pool start=$OSH_LB_SUBNET_START,end=$OSH_LB_SUBNET_END --network lb-mgmt-net lb-mgmt-subnet -f value -c id
openstack security group create lb-mgmt-sec-grp
openstack security group rule create --protocol icmp lb-mgmt-sec-grp
openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp
openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp
# Create security group for Octavia health manager
openstack security group create lb-health-mgr-sec-grp
openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp
# Create ports for health manager (octavia-health-manager-port-{KUBE_NODE_NAME})
# octavia-health-manager pod will be run on each controller node as daemonset.
# The pod will create o-hm0 NIC to each controller node.
# Each o-hm0 NIC uses the IP of these ports.
CONTROLLER_IP_PORT_LIST=''
CTRLS=$(kubectl get nodes -l openstack-control-plane=enabled -o name | awk -F"/" '{print $2}')
for node in $CTRLS
do
PORTNAME=octavia-health-manager-port-$node
openstack port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --host=$node -c id -f value --network lb-mgmt-net $PORTNAME
IP=$(openstack port show $PORTNAME -c fixed_ips -f value | awk -F',' '{print $1}' | awk -F'=' '{print $2}' | tr -d \')
if [ -z $CONTROLLER_IP_PORT_LIST ]; then
CONTROLLER_IP_PORT_LIST=$IP:5555
else
CONTROLLER_IP_PORT_LIST=$CONTROLLER_IP_PORT_LIST,$IP:5555
fi
done
# Each health manager information should be passed into octavia configuration.
echo $CONTROLLER_IP_PORT_LIST > /tmp/octavia_hm_controller_ip_port_list
# Create a flavor for amphora instance
openstack flavor create --id auto --ram 1024 --disk 2 --vcpus 1 --private m1.amphora
# Create key pair to connect amphora instance via management network
ssh-keygen -b 2048 -t rsa -N '' -f ~/.ssh/octavia_ssh_key
openstack keypair create --public-key ~/.ssh/octavia_ssh_key.pub octavia_ssh_key
# Create amphora image from file. Default is https://tarballs.openstack.org/octavia/test-images/
if [ "$OSH_AMPHORA_IMAGE_FILE_PATH" == "" ]; then
curl https://tarballs.openstack.org/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2 \
-o /tmp/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
OSH_AMPHORA_IMAGE_FILE_PATH=/tmp/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
fi
OSH_AMPHORA_IMAGE_ID=$(openstack image create -f value -c id \
--public \
--container-format=bare \
--disk-format qcow2 < $OSH_AMPHORA_IMAGE_FILE_PATH \
$OSH_LB_AMPHORA_IMAGE_NAME)
openstack image set --tag amphora $OSH_AMPHORA_IMAGE_ID

View File

@@ -1,121 +0,0 @@
#!/bin/bash
# Copyright 2019 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export OS_CLOUD=openstack_helm
: ${OSH_LB_AMPHORA_IMAGE_NAME:="amphora-x64-haproxy"}
: ${OSH_LB_HM_HOST_PORT:="5555"}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/octavia.yaml <<EOF
pod:
mounts:
octavia_api:
octavia_api:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/private/cakey.pem
subPath: cakey.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/ca_01.pem
subPath: ca_01.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client.pem
subPath: client.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_worker:
octavia_worker:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/private/cakey.pem
subPath: cakey.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/ca_01.pem
subPath: ca_01.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client.pem
subPath: client.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_housekeeping:
octavia_housekeeping:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/private/cakey.pem
subPath: cakey.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/ca_01.pem
subPath: ca_01.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client.pem
subPath: client.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_health_manager:
octavia_health_manager:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/private/cakey.pem
subPath: cakey.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/ca_01.pem
subPath: ca_01.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client.pem
subPath: client.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
conf:
octavia:
controller_worker:
amp_image_owner_id: $(openstack image show $OSH_LB_AMPHORA_IMAGE_NAME -f value -c owner)
amp_secgroup_list: $(openstack security group list -f value | grep lb-mgmt-sec-grp | awk '{print $1}')
amp_flavor_id: $(openstack flavor show m1.amphora -f value -c id)
amp_boot_network_list: $(openstack network list --name lb-mgmt-net -f value -c ID)
health_manager:
bind_port: $OSH_LB_HM_HOST_PORT
bind_ip: 0.0.0.0
controller_ip_port_list: $(cat /tmp/octavia_hm_controller_ip_port_list)
EOF
helm upgrade --install octavia ./octavia \
--namespace=openstack \
--values=/tmp/octavia.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_OCTAVIA}
#NOTE: Wait for deploy
helm osh wait-for-pods openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx

View File

@@ -0,0 +1,157 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!"
echo "Please use the Octavia Certificate Configuration guide:"
echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# This script produces weak security PKI to save resources in the test gates.
# It should be modified to use stronger encryption (aes256), better pass
# phrases, and longer keys (4096).
# Please see the Octavia Certificate Configuration guide:
# https://docs.openstack.org/octavia/latest/admin/guides/certificates.html
set -x -e
OPENSSL_CONF="$(readlink -f "$(dirname "$0")")"/openssl.cnf
CA_PATH=dual_ca
rm -rf $CA_PATH
mkdir $CA_PATH
chmod 700 $CA_PATH
cd $CA_PATH
mkdir -p etc/octavia/certs
chmod 700 etc/octavia/certs
###### Client Root CA
mkdir client_ca
cd client_ca
mkdir certs crl newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
# Create the client CA private key
openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase
chmod 400 private/ca.key.pem
# Create the client CA root certificate
openssl req -config ${OPENSSL_CONF} -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA" -passin pass:not-secure-passphrase
###### Client Intermediate CA
mkdir intermediate_ca
mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private
chmod 700 intermediate_ca/private
touch intermediate_ca/index.txt
echo 1000 > intermediate_ca/serial
# Create the client intermediate CA private key
openssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase
chmod 400 intermediate_ca/private/intermediate.ca.key.pem
# Create the client intermediate CA certificate signing request
openssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA" -passin pass:not-secure-passphrase
# Create the client intermediate CA certificate
openssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch
# Create the client CA certificate chain
cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem
###### Create the client key and certificate
openssl genpkey -algorithm RSA -out intermediate_ca/private/controller.key.pem -aes-128-cbc -pass pass:not-secure-passphrase
chmod 400 intermediate_ca/private/controller.key.pem
# Create the client controller certificate signing request
openssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController" -passin pass:not-secure-passphrase
# Create the client controller certificate
openssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch
# Build the cancatenated client cert and key
openssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase
cat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem
# We are done with the client CA
cd ..
###### Stash the octavia default client CA cert files
cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem
chmod 444 etc/octavia/certs/client_ca.cert.pem
cp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem
chmod 600 etc/octavia/certs/client.cert-and-key.pem
###### Server Root CA
mkdir server_ca
cd server_ca
mkdir certs crl newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
# Create the server CA private key
openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase
chmod 400 private/ca.key.pem
# Create the server CA root certificate
openssl req -config ${OPENSSL_CONF} -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerRootCA" -passin pass:not-secure-passphrase
###### Server Intermediate CA
mkdir intermediate_ca
mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private
chmod 700 intermediate_ca/private
touch intermediate_ca/index.txt
echo 1000 > intermediate_ca/serial
# Create the server intermediate CA private key
openssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase
chmod 400 intermediate_ca/private/intermediate.ca.key.pem
# Create the server intermediate CA certificate signing request
openssl req -config ${OPENSSL_CONF} -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/server_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerIntermediateCA" -passin pass:not-secure-passphrase
# Create the server intermediate CA certificate
openssl ca -config ${OPENSSL_CONF} -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/server_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch
# Create the server CA certificate chain
cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem
# We are done with the server CA
cd ..
###### Stash the octavia default server CA cert files
cp server_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca-chain.cert.pem
chmod 444 etc/octavia/certs/server_ca-chain.cert.pem
cp server_ca/intermediate_ca/certs/intermediate.cert.pem etc/octavia/certs/server_ca.cert.pem
chmod 400 etc/octavia/certs/server_ca.cert.pem
cp server_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem
chmod 400 etc/octavia/certs/server_ca.key.pem
##### Validate the Octavia PKI files
set +x
echo "################# Verifying the Octavia files ###########################"
openssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem
openssl verify -CAfile etc/octavia/certs/server_ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem
# We are done, stop enforcing shell errexit
set +e
echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!"
echo "Please use the Octavia Certificate Configuration guide:"
echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"

View File

@@ -0,0 +1,358 @@
---
heat_template_version: 2021-04-16
parameters:
public_network_name:
type: string
default: public
public_physical_network_name:
type: string
default: public
public_subnet_name:
type: string
default: public
public_subnet_cidr:
type: string
default: 172.24.4.0/24
public_subnet_gateway:
type: string
default: 172.24.4.1
public_allocation_pool_start:
type: string
default: 172.24.4.10
public_allocation_pool_end:
type: string
default: 172.24.4.254
private_subnet_cidr:
type: string
default: 192.168.128.0/24
dns_nameserver:
type: string
default: 172.24.4.1
image_name:
type: string
default: Ubuntu Jammy
image_url:
type: string
default: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
ssh_key:
type: string
default: octavia-key
compute_flavor_id:
type: string
az_1:
type: string
az_2:
type: string
resources:
public_net:
type: OS::Neutron::ProviderNet
properties:
name:
get_param: public_network_name
router_external: true
physical_network:
get_param: public_physical_network_name
network_type: flat
public_subnet:
type: OS::Neutron::Subnet
properties:
name:
get_param: public_subnet_name
network:
get_resource: public_net
cidr:
get_param: public_subnet_cidr
gateway_ip:
get_param: public_subnet_gateway
enable_dhcp: false
dns_nameservers:
- get_param: public_subnet_gateway
allocation_pools:
- start: {get_param: public_allocation_pool_start}
end: {get_param: public_allocation_pool_end}
private_net:
type: OS::Neutron::Net
private_subnet:
type: OS::Neutron::Subnet
properties:
network:
get_resource: private_net
cidr:
get_param: private_subnet_cidr
dns_nameservers:
- get_param: dns_nameserver
image:
type: OS::Glance::WebImage
properties:
name:
get_param: image_name
location:
get_param: image_url
container_format: bare
disk_format: qcow2
min_disk: 3
visibility: public
flavor_vm:
type: OS::Nova::Flavor
properties:
name: m1.test
disk: 3
ram: 1024
vcpus: 2
wait_handle_1:
type: OS::Heat::WaitConditionHandle
wait_handle_2:
type: OS::Heat::WaitConditionHandle
server_1:
type: OS::Nova::Server
properties:
image:
get_resource: image
flavor:
get_resource: flavor_vm
key_name:
get_param: ssh_key
networks:
- port:
get_resource: server_port_1
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/bash
echo "nameserver $nameserver" > /etc/resolv.conf
echo "127.0.0.1 $(hostname)" >> /etc/hosts
systemctl stop systemd-resolved
systemctl disable systemd-resolved
mkdir -p /var/www/html/
echo "Hello from server_1: $(hostname)" > /var/www/html/index.html
nohup python3 -m http.server 8000 --directory /var/www/html > /dev/null 2>&1 &
$wc_notify --data-binary '{ "status": "SUCCESS" }'
params:
$nameserver: {get_param: dns_nameserver}
$wc_notify: {get_attr: ['wait_handle_1', 'curl_cli']}
availability_zone: {get_param: az_1}
wait_server_1:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle_1}
timeout: 1200
server_2:
type: OS::Nova::Server
properties:
image:
get_resource: image
flavor:
get_resource: flavor_vm
key_name:
get_param: ssh_key
networks:
- port:
get_resource: server_port_2
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/bash
echo "nameserver $nameserver" > /etc/resolv.conf
echo "127.0.0.1 $(hostname)" >> /etc/hosts
systemctl stop systemd-resolved
systemctl disable systemd-resolved
mkdir -p /var/www/html/
echo "Hello from server_2: $(hostname)" > /var/www/html/index.html
nohup python3 -m http.server 8000 --directory /var/www/html > /dev/null 2>&1 &
$wc_notify --data-binary '{ "status": "SUCCESS" }'
params:
$nameserver: {get_param: dns_nameserver}
$wc_notify: {get_attr: ['wait_handle_2', 'curl_cli']}
availability_zone: {get_param: az_2}
wait_server_2:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle_2}
timeout: 1200
security_group:
type: OS::Neutron::SecurityGroup
properties:
name: default_port_security_group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 8000
port_range_max: 8000
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
server_port_1:
type: OS::Neutron::Port
properties:
network:
get_resource: private_net
fixed_ips:
- subnet:
get_resource: private_subnet
security_groups:
- get_resource: security_group
server_floating_ip_1:
type: OS::Neutron::FloatingIP
properties:
floating_network:
get_resource: public_net
port_id:
get_resource: server_port_1
server_port_2:
type: OS::Neutron::Port
properties:
network:
get_resource: private_net
fixed_ips:
- subnet:
get_resource: private_subnet
security_groups:
- get_resource: security_group
server_floating_ip_2:
type: OS::Neutron::FloatingIP
properties:
floating_network:
get_resource: public_net
port_id:
get_resource: server_port_2
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network:
get_resource: public_net
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id:
get_resource: router
subnet_id:
get_resource: private_subnet
flavor_profile:
type: "OS::Octavia::FlavorProfile"
properties:
provider_name: amphora
flavor_data:
str_replace:
template: |
{
"loadbalancer_topology": "SINGLE",
"compute_flavor": "%compute_flavor%"
}
params:
"%compute_flavor%": {get_param: compute_flavor_id}
flavor:
type: "OS::Octavia::Flavor"
properties:
flavor_profile:
get_resource: flavor_profile
loadbalancer:
type: "OS::Octavia::LoadBalancer"
properties:
name: osh
provider: amphora
vip_subnet:
get_resource: private_subnet
flavor:
get_resource: flavor
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: {get_resource: public_net}
port_id: {get_attr: [loadbalancer, vip_port_id]}
listener:
type: "OS::Octavia::Listener"
properties:
protocol_port: 80
protocol: "HTTP"
loadbalancer:
get_resource: loadbalancer
pool:
type: "OS::Octavia::Pool"
properties:
lb_algorithm: "ROUND_ROBIN"
listener:
get_resource: listener
protocol: "HTTP"
monitor:
type: "OS::Octavia::HealthMonitor"
properties:
delay: 3
max_retries: 9
timeout: 3
type: "PING"
pool:
get_resource: pool
pool_member_1:
type: "OS::Octavia::PoolMember"
properties:
subnet:
get_resource: private_subnet
protocol_port: 8000
pool:
get_resource: pool
address:
get_attr:
- "server_1"
- "first_address"
pool_member_2:
type: "OS::Octavia::PoolMember"
properties:
subnet:
get_resource: private_subnet
protocol_port: 8000
pool:
get_resource: pool
address:
get_attr:
- "server_2"
- "first_address"
...

View File

@@ -0,0 +1,186 @@
#!/bin/bash
# Copyright 2019 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Define variables
: ${OSH_HELM_REPO:="../openstack-helm"}
: ${OSH_VALUES_OVERRIDES_PATH:="../openstack-helm/values_overrides"}
: ${OSH_EXTRA_HELM_ARGS_OCTAVIA:="$(helm osh get-values-overrides ${DOWNLOAD_OVERRIDES:-} -p ${OSH_VALUES_OVERRIDES_PATH} -c octavia ${FEATURES})"}
export OS_CLOUD=openstack_helm
OSH_AMPHORA_IMAGE_NAME="amphora-x64-haproxy-ubuntu-jammy"
OSH_AMPHORA_IMAGE_OWNER_ID=$(openstack image show "${OSH_AMPHORA_IMAGE_NAME}" -f value -c owner)
OSH_AMPHORA_SECGROUP_LIST=$(openstack security group list -f value | grep lb-mgmt-sec-grp | awk '{print $1}')
OSH_AMPHORA_FLAVOR_ID=$(openstack flavor show m1.amphora -f value -c id)
OSH_AMPHORA_BOOT_NETWORK_LIST=$(openstack network list --name lb-mgmt-net -f value -c ID)
# Test nodes are quite small (usually 8Gb RAM) and for testing Octavia
# we need two worker VM instances and one amphora VM instance.
# We are going to run them all on different K8s nodes.
# The /tmp/inventory_k8s_nodes.txt file is created by the deploy-env role and contains the list
# of all K8s nodes. Amphora instance is run on the first K8s node from the list.
OSH_AMPHORA_TARGET_HOSTNAME=$(sed -n '1p' /tmp/inventory_k8s_nodes.txt)
CONTROLLER_IP_PORT_LIST=$(cat /tmp/octavia_hm_controller_ip_port_list)
#NOTE: Deploy command
tee /tmp/octavia.yaml <<EOF
pod:
mounts:
octavia_api:
octavia_api:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca.cert.pem
subPath: server_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca-chain.cert.pem
subPath: server_ca-chain.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/server_ca.key.pem
subPath: server_ca.key.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client_ca.cert.pem
subPath: client_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/client.cert-and-key.pem
subPath: client.cert-and-key.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_worker:
octavia_worker:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca.cert.pem
subPath: server_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca-chain.cert.pem
subPath: server_ca-chain.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/server_ca.key.pem
subPath: server_ca.key.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client_ca.cert.pem
subPath: client_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/client.cert-and-key.pem
subPath: client.cert-and-key.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_housekeeping:
octavia_housekeeping:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca.cert.pem
subPath: server_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca-chain.cert.pem
subPath: server_ca-chain.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/server_ca.key.pem
subPath: server_ca.key.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client_ca.cert.pem
subPath: client_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/client.cert-and-key.pem
subPath: client.cert-and-key.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_health_manager:
octavia_health_manager:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca.cert.pem
subPath: server_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca-chain.cert.pem
subPath: server_ca-chain.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/server_ca.key.pem
subPath: server_ca.key.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client_ca.cert.pem
subPath: client_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/client.cert-and-key.pem
subPath: client.cert-and-key.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
octavia_driver_agent:
octavia_driver_agent:
volumeMounts:
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca.cert.pem
subPath: server_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/server_ca-chain.cert.pem
subPath: server_ca-chain.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/server_ca.key.pem
subPath: server_ca.key.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/client_ca.cert.pem
subPath: client_ca.cert.pem
- name: octavia-certs
mountPath: /etc/octavia/certs/private/client.cert-and-key.pem
subPath: client.cert-and-key.pem
volumes:
- name: octavia-certs
secret:
secretName: octavia-certs
defaultMode: 0644
conf:
octavia:
controller_worker:
amp_image_owner_id: ${OSH_AMPHORA_IMAGE_OWNER_ID}
amp_secgroup_list: ${OSH_AMPHORA_SECGROUP_LIST}
amp_flavor_id: ${OSH_AMPHORA_FLAVOR_ID}
amp_boot_network_list: ${OSH_AMPHORA_BOOT_NETWORK_LIST}
amp_image_tag: amphora
amp_ssh_key_name: octavia-key
health_manager:
bind_port: 5555
bind_ip: 0.0.0.0
controller_ip_port_list: ${CONTROLLER_IP_PORT_LIST}
task_flow:
jobboard_enabled: false
nova:
availability_zone: nova:${OSH_AMPHORA_TARGET_HOSTNAME}
EOF
helm upgrade --install octavia ${OSH_HELM_REPO}/octavia \
--namespace=openstack \
--values=/tmp/octavia.yaml \
${OSH_EXTRA_HELM_ARGS:=} \
${OSH_EXTRA_HELM_ARGS_OCTAVIA}
#NOTE: Wait for deploy
helm osh wait-for-pods openstack
#NOTE: Validate Deployment info
openstack service list

View File

@@ -31,18 +31,17 @@ metadata:
name: octavia-certs
type: Opaque
data:
ca_01.pem: $(trim_data /tmp/octavia_certs/ca_01.pem)
cakey.pem: $(trim_data /tmp/octavia_certs/private/cakey.pem)
client.pem: $(trim_data /tmp/octavia_certs/client.pem)
server_ca.cert.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca.cert.pem)
server_ca-chain.cert.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca-chain.cert.pem)
server_ca.key.pem: $(trim_data dual_ca/etc/octavia/certs/server_ca.key.pem)
client_ca.cert.pem: $(trim_data dual_ca/etc/octavia/certs/client_ca.cert.pem)
client.cert-and-key.pem: $(trim_data dual_ca/etc/octavia/certs/client.cert-and-key.pem)
EOF
}| kubectl apply --namespace openstack -f -
}
rm -rf /tmp/octavia
git clone -b stable/stein https://github.com/openstack/octavia.git /tmp/octavia
cd /tmp/octavia/bin
rm -rf /tmp/octavia_certs
./create_certificates.sh /tmp/octavia_certs /tmp/octavia/etc/certificates/openssl.cnf
create_secret
(
cd "$(dirname "$0")";
./create_dual_intermediate_CA.sh
create_secret
)

View File

@@ -0,0 +1,116 @@
#!/bin/bash
# Copyright 2019 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export OS_CLOUD=openstack_helm
SSH_DIR="${HOME}/.ssh"
OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS="${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${SSH_DIR}:${SSH_DIR} -v /tmp:/tmp"
export OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS
: ${OSH_LB_SUBNET:="172.31.0.0/24"}
: ${OSH_LB_SUBNET_START:="172.31.0.2"}
: ${OSH_LB_SUBNET_END="172.31.0.200"}
: ${OSH_AMPHORA_IMAGE_NAME:="amphora-x64-haproxy-ubuntu-jammy"}
: ${OSH_AMPHORA_IMAGE_FILE:="test-only-amphora-x64-haproxy-ubuntu-jammy.qcow2"}
: ${OSH_AMPHORA_IMAGE_URL:="https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-jammy.qcow2"}
# # This is for debugging, to be able to connect via ssh to the amphora instance from the cluster node
# # and make the amphora able to connect to Internet.
# # The /tmp/inventory_default_dev.txt file is created by the deploy-env role and contains
# # the name of the default interface on a node.
# sudo iptables -t nat -I POSTROUTING -o $(cat /tmp/inventory_default_dev.txt) -s ${OSH_LB_SUBNET} -j MASQUERADE
# sudo iptables -t filter -I FORWARD -s ${OSH_LB_SUBNET} -j ACCEPT
# Create Octavia management network and its security group
openstack network show lb-mgmt-net || \
openstack network create lb-mgmt-net -f value -c id
openstack subnet show lb-mgmt-subnet || \
openstack subnet create --subnet-range $OSH_LB_SUBNET --allocation-pool start=$OSH_LB_SUBNET_START,end=$OSH_LB_SUBNET_END --network lb-mgmt-net lb-mgmt-subnet -f value -c id
openstack security group show lb-mgmt-sec-grp || \
{ openstack security group create lb-mgmt-sec-grp; \
openstack security group rule create --protocol icmp lb-mgmt-sec-grp; \
openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp; \
openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp; }
# Create security group for Octavia health manager
openstack security group show lb-health-mgr-sec-grp || \
{ openstack security group create lb-health-mgr-sec-grp; \
openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp; }
# Create security group for Octavia worker
openstack security group show lb-worker-sec-grp || \
{ openstack security group create lb-worker-sec-grp; }
# Create ports for health manager (octavia-health-manager-port-{KUBE_NODE_NAME})
# and the same for worker (octavia-worker-port-{KUBE_NODE_NAME})
# octavia-health-manager and octavia-worker pods will be run on each network node as daemonsets.
# The pods will create NICs on each network node attached to lb-mgmt-net.
CONTROLLER_IP_PORT_LIST=''
CTRLS=$(kubectl get nodes -l openstack-network-node=enabled -o name | awk -F"/" '{print $2}')
for node in $CTRLS
do
PORTNAME=octavia-health-manager-port-$node
openstack port show $PORTNAME || \
openstack port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --host=$node -c id -f value --network lb-mgmt-net $PORTNAME
IP=$(openstack port show $PORTNAME -f json | jq -r '.fixed_ips[0].ip_address')
if [ -z $CONTROLLER_IP_PORT_LIST ]; then
CONTROLLER_IP_PORT_LIST=$IP:5555
else
CONTROLLER_IP_PORT_LIST=$CONTROLLER_IP_PORT_LIST,$IP:5555
fi
WORKER_PORTNAME=octavia-worker-port-$node
openstack port show $WORKER_PORTNAME || \
openstack port create --security-group lb-worker-sec-grp --device-owner Octavia:worker --host=$node -c id -f value --network lb-mgmt-net $WORKER_PORTNAME
openstack port show $WORKER_PORTNAME -f json | jq -r '.fixed_ips[0].ip_address'
done
# Each health manager information should be passed into octavia configuration.
echo $CONTROLLER_IP_PORT_LIST > /tmp/octavia_hm_controller_ip_port_list
# Create a flavor for amphora instance
openstack flavor show m1.amphora || \
openstack flavor create --ram 1024 --disk 3 --vcpus 1 m1.amphora
# Create key pair to connect amphora instance via management network
mkdir -p ${SSH_DIR}
openstack keypair show octavia-key || \
openstack keypair create --private-key ${SSH_DIR}/octavia_key octavia-key
sudo chown $(id -un) ${SSH_DIR}/octavia_key
chmod 600 ${SSH_DIR}/octavia_key
# accept diffie-hellman-group1-sha1 algo for SSH (for compatibility with older images)
sudo tee -a /etc/ssh/ssh_config <<EOF
KexAlgorithms +diffie-hellman-group1-sha1
HostKeyAlgorithms +ssh-rsa
PubkeyAcceptedKeyTypes +ssh-rsa
EOF
if [ ! -f "/tmp/${OSH_AMPHORA_IMAGE_FILE}" ]; then
curl --fail -sSL ${OSH_AMPHORA_IMAGE_URL} -o /tmp/${OSH_AMPHORA_IMAGE_FILE}
fi
openstack image show ${OSH_AMPHORA_IMAGE_NAME} || \
openstack image create -f value -c id \
--public \
--container-format=bare \
--disk-format qcow2 \
--min-disk 2 \
--file /tmp/${OSH_AMPHORA_IMAGE_FILE} \
${OSH_AMPHORA_IMAGE_NAME}
OSH_AMPHORA_IMAGE_ID=$(openstack image show ${OSH_AMPHORA_IMAGE_NAME} -f value -c id)
openstack image set --tag amphora ${OSH_AMPHORA_IMAGE_ID}

View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export OS_CLOUD=openstack_helm
HEAT_DIR="$(readlink -f ./tools/deployment/component/octavia)"
SSH_DIR="${HOME}/.ssh"
OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS="${OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS} -v ${HEAT_DIR}:${HEAT_DIR} -v ${SSH_DIR}:${SSH_DIR}"
export OPENSTACK_CLIENT_CONTAINER_EXTRA_ARGS
COMPUTE_FLAVOR_ID=$(openstack flavor show -f value -c id m1.amphora)
# The /tmp/inventory_k8s_nodes.txt file is created by the deploy-env role and contains the list
# of all K8s nodes. Amphora instance is run on the first K8s node from the list.
# Worker VM instances are run on the rest of the nodes.
TARGET_HOST_1=$(sed -n '2p' /tmp/inventory_k8s_nodes.txt)
TARGET_HOST_2=$(sed -n '3p' /tmp/inventory_k8s_nodes.txt)
openstack stack show "octavia-env" || \
openstack stack create --wait \
--parameter compute_flavor_id=${COMPUTE_FLAVOR_ID} \
--parameter az_1="nova:${TARGET_HOST_1}" \
--parameter az_2="nova:${TARGET_HOST_2}" \
-t ${HEAT_DIR}/heat_octavia_env.yaml \
octavia-env
sleep 30
LB_FLOATING_IP=$(openstack floating ip list --port $(openstack loadbalancer show osh -c vip_port_id -f value) -f value -c "Floating IP Address" | head -n1)
echo -n > /tmp/curl.txt
curl http://${LB_FLOATING_IP} >> /tmp/curl.txt
curl http://${LB_FLOATING_IP} >> /tmp/curl.txt
grep "Hello from server_1" /tmp/curl.txt
grep "Hello from server_2" /tmp/curl.txt

View File

@@ -0,0 +1,144 @@
# OpenSSL root CA configuration file.
[ ca ]
# `man ca`
default_ca = CA_default
[ CA_default ]
# Directory and file locations.
dir = ./
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = $dir/private/ca.key.pem
certificate = $dir/certs/ca.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/ca.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
# 10 years
default_days = 7300
preserve = no
policy = policy_strict
[ CA_intermediate ]
# Directory and file locations.
dir = ./intermediate_ca
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = ./private/ca.key.pem
certificate = ./certs/ca.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/ca.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
# 5 years
default_days = 3650
preserve = no
policy = policy_strict
[ policy_strict ]
# The root CA should only sign intermediate certificates that match.
# See the POLICY FORMAT section of `man ca`.
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
# Options for the `req` tool (`man req`).
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
# Extension to add when the -x509 option is used.
x509_extensions = v3_ca
[ req_distinguished_name ]
# See <https://en.wikipedia.org/wiki/Certificate_signing_request>.
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
# Optionally, specify some defaults.
countryName_default = US
stateOrProvinceName_default = Oregon
localityName_default = Corvallis
0.organizationName_default = OpenStack
organizationalUnitName_default = Octavia
emailAddress_default =
commonName_default = example.org
[ v3_ca ]
# Extensions for a typical CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
# Extensions for a typical intermediate CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ usr_cert ]
# Extensions for client certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
# Extensions for server certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[ crl_ext ]
# Extension for CRLs (`man x509v3_config`).
authorityKeyIdentifier=keyid:always

View File

@@ -0,0 +1,23 @@
---
images:
tags:
test: docker.io/xrally/xrally-openstack:2.0.0
bootstrap: quay.io/airshipit/heat:2025.1-ubuntu_jammy
db_init: quay.io/airshipit/heat:2025.1-ubuntu_jammy
octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
db_drop: quay.io/airshipit/heat:2025.1-ubuntu_jammy
rabbit_init: docker.io/rabbitmq:3.13-management
ks_user: quay.io/airshipit/heat:2025.1-ubuntu_jammy
ks_service: quay.io/airshipit/heat:2025.1-ubuntu_jammy
ks_endpoints: quay.io/airshipit/heat:2025.1-ubuntu_jammy
dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy
image_repo_sync: docker.io/docker:17.07.0
octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy
octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_jammy
octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_jammy
openvswitch_vswitchd: docker.io/openstackhelm/openvswitch:latest-ubuntu_jammy
...

View File

@@ -0,0 +1,23 @@
---
images:
tags:
test: docker.io/xrally/xrally-openstack:2.0.0
bootstrap: quay.io/airshipit/heat:2025.1-ubuntu_noble
db_init: quay.io/airshipit/heat:2025.1-ubuntu_noble
octavia_db_sync: quay.io/airshipit/octavia:2025.1-ubuntu_noble
db_drop: quay.io/airshipit/heat:2025.1-ubuntu_noble
rabbit_init: docker.io/rabbitmq:3.13-management
ks_user: quay.io/airshipit/heat:2025.1-ubuntu_noble
ks_service: quay.io/airshipit/heat:2025.1-ubuntu_noble
ks_endpoints: quay.io/airshipit/heat:2025.1-ubuntu_noble
dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_noble
image_repo_sync: docker.io/docker:17.07.0
octavia_api: quay.io/airshipit/octavia:2025.1-ubuntu_noble
octavia_driver_agent: quay.io/airshipit/octavia:2025.1-ubuntu_noble
octavia_worker: quay.io/airshipit/octavia:2025.1-ubuntu_noble
octavia_worker_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble
octavia_housekeeping: quay.io/airshipit/octavia:2025.1-ubuntu_noble
octavia_health_manager: quay.io/airshipit/octavia:2025.1-ubuntu_noble
octavia_health_manager_init: quay.io/airshipit/openstack-client:2025.1-ubuntu_noble
openvswitch_vswitchd: docker.io/openstackhelm/openvswitch:latest-ubuntu_noble
...

View File

@@ -63,4 +63,15 @@
openstack_release: "2025.1"
container_distro_name: ubuntu
container_distro_version: noble
- job:
name: openstack-helm-octavia-2025-1-ubuntu_jammy
parent: openstack-helm-octavia
nodeset: openstack-helm-4nodes-ubuntu_jammy
timeout: 10800
vars:
osh_params:
openstack_release: "2025.1"
container_distro_name: ubuntu
container_distro_version: jammy
...

View File

@@ -130,6 +130,7 @@
abstract: true
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -155,6 +156,7 @@
abstract: true
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -181,6 +183,7 @@
abstract: true
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -206,6 +209,7 @@
vars:
osh_helm_repo: openstack-helm
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/prepare-helm-repos-local.sh
@@ -234,6 +238,7 @@
osh_helm_repo: openstack-helm
download_overrides: "-d"
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/prepare-helm-repos-public.sh
@@ -260,6 +265,7 @@
abstract: true
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -289,6 +295,7 @@
- ^zuul\.d/.*$
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -309,6 +316,7 @@
- ^tools/deployment/component/cinder/.*$
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -333,6 +341,7 @@
- ^tools/deployment/ceph/.*$
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -368,6 +377,7 @@
vars:
run_helm_tests: "yes"
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -388,6 +398,7 @@
- ^tools/deployment/component/horizon/.*$
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -407,6 +418,7 @@
voting: false
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -438,6 +450,7 @@
timeout: 7200
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -457,10 +470,11 @@
- job:
name: openstack-helm-skyline
parent: openstack-helm-compute-kit
parent: openstack-helm-deploy
timeout: 10800
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
@@ -475,4 +489,29 @@
- ./tools/deployment/component/compute-kit/compute-kit.sh
- ./tools/deployment/component/skyline/skyline.sh
- ./tools/gate/selenium/skyline-selenium.sh
- job:
name: openstack-helm-octavia
parent: openstack-helm-deploy
timeout: 10800
vars:
gate_scripts:
- ./tools/deployment/common/prepare-bashrc.sh
- ./tools/deployment/common/prepare-k8s.sh
- ./tools/deployment/common/prepare-charts.sh
- ./tools/deployment/common/setup-client.sh
- export VOLUME_HELM_ARGS="--set volume.enabled=false"; ./tools/deployment/component/common/rabbitmq.sh
- ./tools/deployment/db/mariadb.sh
- ./tools/deployment/component/common/memcached.sh
- ./tools/deployment/component/keystone/keystone.sh
- ./tools/deployment/component/heat/heat.sh
- export GLANCE_BACKEND=local; ./tools/deployment/component/glance/glance.sh
- ./tools/deployment/component/compute-kit/openvswitch.sh
- ./tools/deployment/component/compute-kit/libvirt.sh
- ./tools/deployment/component/compute-kit/compute-kit.sh
- ./tools/deployment/component/barbican/barbican.sh
- ./tools/deployment/component/octavia/octavia_resources.sh
- ./tools/deployment/component/octavia/octavia_certs.sh
- ./tools/deployment/component/octavia/octavia.sh
- ./tools/deployment/component/octavia/octavia_test.sh
...

View File

@@ -193,6 +193,41 @@
nodes:
- primary
- nodeset:
name: openstack-helm-4nodes-ubuntu_jammy
nodes:
- name: primary
label: ubuntu-jammy
- name: node-1
label: ubuntu-jammy
- name: node-2
label: ubuntu-jammy
- name: node-3
label: ubuntu-jammy
groups:
- name: primary
nodes:
- primary
- name: nodes
nodes:
- node-1
- node-2
- node-3
- name: k8s_cluster
nodes:
- primary
- node-1
- node-2
- node-3
- name: k8s_control_plane
nodes:
- primary
- name: k8s_nodes
nodes:
- node-1
- node-2
- node-3
- nodeset:
name: openstack-helm-5nodes-ubuntu_jammy
nodes:

View File

@@ -39,6 +39,7 @@
- openstack-helm-cinder-2025-1-ubuntu_jammy # 3 nodes rook
- openstack-helm-compute-kit-2025-1-ubuntu_jammy # 1 node + 3 nodes
- openstack-helm-skyline-2025-1-ubuntu_jammy # 3 nodes
- openstack-helm-octavia-2025-1-ubuntu_jammy # 4 nodes
# 2025.1 Ubuntu Noble
- openstack-helm-cinder-2025-1-ubuntu_noble # 5 nodes rook
- openstack-helm-compute-kit-2025-1-ubuntu_noble # 1 node + 3 nodes