Refactor deploy-env role

- Make it less mixed. Each task file
  deploys one feature.
- Deploy Metallb
- Deploy Openstack provider network gateway

Change-Id: I41f0353b286f817cb562b3bd59992e4baa473568
This commit is contained in:
Vladimir Kozhukalov 2024-03-24 22:23:49 -05:00
parent a957d8e12c
commit 5b1879aa09
22 changed files with 687 additions and 235 deletions

View File

@ -1,8 +1,9 @@
This role is used to deploy test environment which includes This role is used to deploy test environment which includes
- install necessary prerequisites including Helm - install necessary prerequisites including Helm
- deploy Containerd and a container runtime for Kubernetes - deploy Containerd and a container runtime for Kubernetes
- deploy Kubernetes using Kubeadm with a single control plain node - deploy Kubernetes using Kubeadm with a single control plane node
- install Calico as a Kubernetes networking - install Calico as a Kubernetes networking
- establish tunnel between primary node and K8s control plane ndoe
The role works both for singlenode and multinode inventories and The role works both for singlenode and multinode inventories and
assumes the inventory has the node called `primary` and the group called `nodes`. assumes the inventory has the node called `primary` and the group called `nodes`.
@ -11,27 +12,32 @@ See for example:
```yaml ```yaml
all: all:
vars:
ansible_port: 22
ansible_user: ubuntu
ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa
ansible_ssh_extra_args: -o StrictHostKeyChecking=no
children: children:
ungrouped: primary:
hosts: hosts:
primary: primary:
ansible_port: 22
ansible_host: 10.10.10.10 ansible_host: 10.10.10.10
ansible_user: ubuntu k8s_cluster:
ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa
ansible_ssh_extra_args: -o StrictHostKeyChecking=no
nodes:
hosts: hosts:
node-1: node-1:
ansible_port: 22
ansible_host: 10.10.10.11 ansible_host: 10.10.10.11
ansible_user: ubuntu
ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa
ansible_ssh_extra_args: -o StrictHostKeyChecking=no
node-2: node-2:
ansible_port: 22
ansible_host: 10.10.10.12 ansible_host: 10.10.10.12
ansible_user: ubuntu node-3:
ansible_ssh_private_key_file: /home/ubuntu/.ssh/id_rsa ansible_host: 10.10.10.13
ansible_ssh_extra_args: -o StrictHostKeyChecking=no k8s_control-plane:
hosts:
node-1:
ansible_host: 10.10.10.11
k8s_nodes:
hosts:
node-2:
ansible_host: 10.10.10.12
node-3:
ansible_host: 10.10.10.13
``` ```

View File

@ -35,4 +35,21 @@ loopback_setup: false
loopback_device: /dev/loop100 loopback_device: /dev/loop100
loopback_image: /var/lib/openstack-helm/ceph-loop.img loopback_image: /var/lib/openstack-helm/ceph-loop.img
loopback_image_size: 12G loopback_image_size: 12G
metallb_setup: false
metallb_pool_cidr: "172.24.128.0/24"
metallb_openstack_endpoint_cidr: "172.24.128.100/24"
client_ssh_user: zuul
client_ssh_key_file: /home/zuul/.ssh/id_rsa
cluster_ssh_user: zuul
openstack_provider_gateway_setup: false
openstack_provider_network_cidr: "172.24.4.0/24"
openstack_provider_gateway_cidr: "172.24.4.1/24"
tunnel_network_cidr: "172.24.5.0/24"
tunnel_client_cidr: "172.24.5.2/24"
tunnel_cluster_cidr: "172.24.5.1/24"
... ...

View File

@ -15,9 +15,8 @@ spec:
value: "9091" value: "9091"
- name: FELIX_IGNORELOOSERPF - name: FELIX_IGNORELOOSERPF
value: "true" value: "true"
# We assign IP on br-ex interface while testing the deployed Openstack cluster and
# we need Calico to skip this interface while discovering the # we need Calico to skip this interface while discovering the
# network changes on the host to prevent announcing unnecessary networks. # network changes on the host to prevent announcing unnecessary networks.
- name: IP_AUTODETECTION_METHOD - name: IP_AUTODETECTION_METHOD
value: "skip-interface=br-ex" value: "skip-interface=br-ex|provider.*|client.*"
... ...

View File

@ -0,0 +1 @@
nameserver 10.96.0.10

View File

@ -2,6 +2,8 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1 apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration kind: KubeProxyConfiguration
mode: ipvs mode: ipvs
ipvs:
strictARP: true
... ...
--- ---
apiVersion: kubeadm.k8s.io/v1beta3 apiVersion: kubeadm.k8s.io/v1beta3

View File

@ -0,0 +1,25 @@
user nginx;
worker_processes auto;
error_log /dev/stdout warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
access_log off;
server {
listen {{ openstack_provider_gateway_cidr | ipaddr('address') }}:80;
proxy_pass {{ metallb_openstack_endpoint_cidr | ipaddr('address') }}:80;
proxy_bind {{ openstack_provider_gateway_cidr | ipaddr('address') }} transparent;
}
server {
listen {{ openstack_provider_gateway_cidr | ipaddr('address') }}:443;
proxy_pass {{ metallb_openstack_endpoint_cidr | ipaddr('address') }}:443;
proxy_bind {{ openstack_provider_gateway_cidr | ipaddr('address') }} transparent;
}
}

View File

@ -1,4 +1 @@
nameserver 8.8.8.8 nameserver {{ nameserver_ip }}
nameserver 8.8.4.4
search svc.cluster.local cluster.local
options ndots:5 timeout:1 attempts:1

View File

@ -0,0 +1,55 @@
---
# We download Calico manifest on all nodes because we then want to download
# Calico images BEFORE deploying it, so that `kubectl wait` timeout
# for `k8s-app=kube-dns` isn't reached by slow download speeds
- name: Download Calico manifest
when: inventory_hostname in (groups['k8s_cluster'] | default([]))
shell: |
curl -LSs {{ calico_manifest_url }} -o /tmp/calico.yaml
sed -i -e 's#docker.io/calico/#quay.io/calico/#g' /tmp/calico.yaml
export CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
export IMAGE_SERVICE_ENDPOINT=unix:///run/containerd/containerd.sock
awk '/image:/ { print $2 }' /tmp/calico.yaml | xargs -I{} crictl pull {}
args:
executable: /bin/bash
- name: Deploy Calico
become: false
when: inventory_hostname in (groups['primary'] | default([]))
block:
- name: Download Calico manifest
shell: |
if [[ ! -f /tmp/calico.yaml ]]; then
curl -LSs {{ calico_manifest_url }} -o /tmp/calico.yaml
sed -i -e 's#docker.io/calico/#quay.io/calico/#g' /tmp/calico.yaml
fi
args:
executable: /bin/bash
- name: Deploy Calico
command: kubectl apply -f /tmp/calico.yaml
- name: Sleep before trying to check Calico pods
pause:
seconds: 30
- name: Wait for Calico pods ready
command: kubectl -n kube-system wait --timeout=20s --for=condition=Ready pods -l k8s-app=calico-node
register: calico_pods_wait
until: calico_pods_wait is succeeded
retries: 10
- name: Prepare Calico patch
copy:
src: files/calico_patch.yaml
dest: /tmp/calico_patch.yaml
- name: Patch Calico
command: kubectl -n kube-system patch daemonset calico-node --patch-file /tmp/calico_patch.yaml
- name: Wait for Calico pods ready (after patch)
command: kubectl -n kube-system wait --timeout=20s --for=condition=Ready pods -l k8s-app=calico-node
register: calico_pods_wait
until: calico_pods_wait is succeeded
retries: 10
...

View File

@ -0,0 +1,73 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Set cluster IP
set_fact:
cluster_default_ip: "{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']))[0] }}"
- name: Set client IP
set_fact:
client_default_ip: "{{ (groups['primary'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']))[0] }}"
- name: Setup wireguard tunnel between primary and cluster control-plane node
when: (groups['primary'] | difference(groups['k8s_control_plane']) | length > 0)
block:
- name: Generate wireguard key pair
shell: |
wg genkey | tee /root/wg-private-key | wg pubkey > /root/wg-public-key
chmod 600 /root/wg-private-key
when: (inventory_hostname in (groups['primary'] | default([]))) or (inventory_hostname in (groups['k8s_control_plane'] | default([])))
- name: Register public wireguard key variable
command: cat /root/wg-public-key
register: wg_public_key
when: (inventory_hostname in (groups['primary'] | default([]))) or (inventory_hostname in (groups['k8s_control_plane'] | default([])))
- name: Set primary wireguard public key
set_fact:
client_wg_public_key: "{{ (groups['primary'] | map('extract', hostvars, ['wg_public_key', 'stdout']))[0] }}"
when: inventory_hostname in (groups['k8s_control_plane'] | default([]))
- name: Set cluster wireguard public key
set_fact:
cluster_wg_public_key: "{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['wg_public_key', 'stdout']))[0] }}"
when: inventory_hostname in (groups['primary'] | default([]))
- name: Set up wireguard tunnel on cluster control-plane node
shell: |
cat > /tmp/configure_cluster_tunnel.sh <<EOF
ip link add client-wg type wireguard
ip addr add {{ tunnel_cluster_cidr }} dev client-wg
wg set client-wg listen-port 51820 private-key /root/wg-private-key peer {{ client_wg_public_key }} allowed-ips {{ tunnel_network_cidr }} endpoint {{ client_default_ip }}:51820
ip link set client-wg up
iptables -t filter -P FORWARD ACCEPT
iptables -t filter -I FORWARD -o client-gw -j ACCEPT
EOF
chmod +x /tmp/configure_cluster_tunnel.sh
/tmp/configure_cluster_tunnel.sh
when: inventory_hostname in (groups['k8s_control_plane'] | default([]))
- name: Set up wireguard tunnel on primary node
shell: |
cat > /tmp/configure_client_tunnel.sh <<EOF
ip link add client-wg type wireguard
ip addr add {{ tunnel_client_cidr }} dev client-wg
wg set client-wg listen-port 51820 private-key /root/wg-private-key peer {{ cluster_wg_public_key }} allowed-ips {{ tunnel_network_cidr }},{{ openstack_provider_network_cidr }},{{ metallb_pool_cidr }} endpoint {{ cluster_default_ip }}:51820
ip link set client-wg up
ip route add {{ metallb_pool_cidr }} via {{ tunnel_cluster_cidr | ipaddr('address') }} dev client-wg
ip route add {{ openstack_provider_network_cidr }} via {{ tunnel_cluster_cidr | ipaddr('address') }} dev client-wg
EOF
chmod +x /tmp/configure_client_tunnel.sh
/tmp/configure_client_tunnel.sh
when: inventory_hostname in (groups['primary'] | default([]))
...

View File

@ -22,14 +22,6 @@
- runc - runc
state: absent state: absent
- name: Ensure dependencies are installed
apt:
name:
- apt-transport-https
- ca-certificates
- gnupg2
state: present
- name: Add Docker apt repository key - name: Add Docker apt repository key
apt_key: apt_key:
url: https://download.docker.com/linux/ubuntu/gpg url: https://download.docker.com/linux/ubuntu/gpg
@ -57,14 +49,6 @@
state: present state: present
update_cache: true update_cache: true
- name: Install Crictl
shell: |
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/{{crictl_version}}/crictl-{{crictl_version}}-linux-amd64.tar.gz
sudo tar zxvf crictl-{{crictl_version}}-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-{{crictl_version}}-linux-amd64.tar.gz
args:
executable: /bin/bash
- name: Configure Docker daemon - name: Configure Docker daemon
template: template:
src: files/daemon.json src: files/daemon.json
@ -83,6 +67,14 @@
- name: Reset ssh connection to apply user changes. - name: Reset ssh connection to apply user changes.
meta: reset_connection meta: reset_connection
- name: Install Crictl
shell: |
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/{{crictl_version}}/crictl-{{crictl_version}}-linux-amd64.tar.gz
sudo tar zxvf crictl-{{crictl_version}}-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-{{crictl_version}}-linux-amd64.tar.gz
args:
executable: /bin/bash
- name: Set mirror_fqdn fact - name: Set mirror_fqdn fact
when: when:
- registry_mirror is not defined - registry_mirror is not defined

View File

@ -1,84 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Mount tmpfs to /var/lib/etcd
mount:
path: /var/lib/etcd
src: tmpfs
fstype: tmpfs
opts: size=1g
state: mounted
- name: Prepare kubeadm config
template:
src: files/kubeadm_config.yaml
dest: /tmp/kubeadm_config.yaml
- name: Initialize the Kubernetes cluster using kubeadm
command: kubeadm init --config /tmp/kubeadm_config.yaml
- name: "Setup kubeconfig for {{ kubectl.user }} user"
shell: |
mkdir -p /home/{{ kubectl.user }}/.kube
cp -i /etc/kubernetes/admin.conf /home/{{ kubectl.user }}/.kube/config
chown -R {{ kubectl.user }}:{{ kubectl.group }} /home/{{ kubectl.user }}/.kube
args:
executable: /bin/bash
- name: Deploy Calico
become: false
command: kubectl apply -f /tmp/calico.yaml
- name: Sleep before trying to check Calico pods
pause:
seconds: 20
- name: Wait for Calico pods ready
become: false
command: kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=calico-node
- name: Prepare Calico patch
copy:
src: files/calico_patch.yaml
dest: /tmp/calico_patch.yaml
- name: Patch Calico
become: false
command: kubectl -n kube-system patch daemonset calico-node --patch-file /tmp/calico_patch.yaml
- name: Wait for Calico pods ready
become: false
command: kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=calico-node
- name: Generate join command
command: kubeadm token create --print-join-command
register: join_command
- name: Untaint Kubernetes control plane node
become: false
command: kubectl taint nodes -l 'node-role.kubernetes.io/control-plane' node-role.kubernetes.io/control-plane-
- name: Enable recursive queries for coredns
become: false
shell: |
PATCH=$(mktemp)
kubectl get configmap coredns -n kube-system -o json | jq -r "{data: .data}" | sed 's/ready\\n/header \{\\n response set ra\\n \}\\n ready\\n/g' > "${PATCH}"
kubectl patch configmap coredns -n kube-system --patch-file "${PATCH}"
kubectl set image deployment coredns -n kube-system "coredns=registry.k8s.io/coredns/coredns:v1.9.4"
kubectl rollout restart -n kube-system deployment/coredns
sleep 10
kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=kube-dns
rm -f "${PATCH}"
args:
executable: /bin/bash
...

View File

@ -0,0 +1,37 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Enable recursive queries for coredns
become: false
shell: |
PATCH=$(mktemp)
kubectl get configmap coredns -n kube-system -o json | jq -r "{data: .data}" | sed 's/ready\\n/header \{\\n response set ra\\n \}\\n ready\\n/g' > "${PATCH}"
kubectl patch configmap coredns -n kube-system --patch-file "${PATCH}"
kubectl set image deployment coredns -n kube-system "coredns=registry.k8s.io/coredns/coredns:v1.9.4"
kubectl rollout restart -n kube-system deployment/coredns
sleep 30
kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=kube-dns
rm -f "${PATCH}"
args:
executable: /bin/bash
when: inventory_hostname in (groups['primary'] | default([]))
- name: Use coredns as default DNS resolver
copy:
src: files/cluster_resolv.conf
dest: /etc/resolv.conf
owner: root
group: root
mode: 0644
when: inventory_hostname in (groups['k8s_cluster'] | default([]))
...

View File

@ -0,0 +1,54 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Install Kubectl
apt:
state: present
update_cache: true
allow_downgrade: true
pkg:
- "kubectl={{ kube_version }}"
- name: "Setup kubeconfig directory for {{ kubectl.user }} user"
shell: |
mkdir -p /home/{{ kubectl.user }}/.kube
- name: "Copy kube_config file for {{ kubectl.user }} user"
synchronize:
src: /tmp/kube_config
dest: /home/{{ kubectl.user }}/.kube/config
- name: "Set kubconfig file ownership for {{ kubectl.user }} user"
shell: |
chown -R {{ kubectl.user }}:{{ kubectl.group }} /home/{{ kubectl.user }}/.kube
- name: Deploy Helm
block:
- name: Install Helm
shell: |
TMP_DIR=$(mktemp -d)
curl -sSL https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
mv "${TMP_DIR}"/helm /usr/local/bin/helm
rm -rf "${TMP_DIR}"
args:
executable: /bin/bash
# This is to improve build time
- name: Remove stable Helm repo
command: helm repo remove stable
ignore_errors: true
- name: Untaint Kubernetes control plane node
become: false
command: kubectl taint nodes -l 'node-role.kubernetes.io/control-plane' node-role.kubernetes.io/control-plane-
...

View File

@ -43,6 +43,16 @@
state: present state: present
ignore_errors: true ignore_errors: true
- name: Configure number of inotify instances
sysctl:
name: "{{ item }}"
value: "0"
state: present
loop:
- net.ipv4.conf.all.rp_filter
- net.ipv4.conf.default.rp_filter
ignore_errors: true
- name: Remove swapfile from /etc/fstab - name: Remove swapfile from /etc/fstab
mount: mount:
name: "{{ item }}" name: "{{ item }}"
@ -56,27 +66,6 @@
command: swapoff -a command: swapoff -a
when: ansible_swaptotal_mb > 0 when: ansible_swaptotal_mb > 0
- name: Ensure dependencies are installed
apt:
name:
- apt-transport-https
- ca-certificates
- gnupg2
- ipvsadm
- jq
state: present
- name: Add Kubernetes apt repository key
apt_key:
url: "https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/Release.key"
state: present
- name: Add Kubernetes apt repository
apt_repository:
repo: "deb https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/ /"
state: present
filename: kubernetes.list
- name: Install Kubernetes binaries - name: Install Kubernetes binaries
apt: apt:
state: present state: present
@ -93,36 +82,27 @@
daemon_reload: yes daemon_reload: yes
state: restarted state: restarted
- name: Configure resolv.conf
template:
src: files/resolv.conf
dest: /etc/resolv.conf
owner: root
group: root
mode: 0644
vars:
nameserver_ip: "8.8.8.8"
- name: Disable systemd-resolved - name: Disable systemd-resolved
service: service:
name: systemd-resolved name: systemd-resolved
enabled: false enabled: false
state: stopped state: stopped
ignore_errors: true
- name: Configure resolv.conf - name: Disable unbound
copy: service:
src: files/resolv.conf name: unbound
dest: "{{ item }}" enabled: false
loop: state: stopped
- /etc/resolv.conf ignore_errors: true
- /run/systemd/resolve/resolv.conf
# We download Calico manifest on all nodes because we then want to download
# Calico images BEFORE deploying it
- name: Download Calico manifest
shell: |
curl -LSs {{ calico_manifest_url }} -o /tmp/calico.yaml
sed -i -e 's#docker.io/calico/#quay.io/calico/#g' /tmp/calico.yaml
args:
executable: /bin/bash
# Download images needed for calico before applying manifests, so that `kubectl wait` timeout
# for `k8s-app=kube-dns` isn't reached by slow download speeds
- name: Download Calico images
shell: |
export CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
export IMAGE_SERVICE_ENDPOINT=unix:///run/containerd/containerd.sock
awk '/image:/ { print $2 }' /tmp/calico.yaml | xargs -I{} crictl pull {}
args:
executable: /bin/bash
... ...

View File

@ -0,0 +1,39 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Mount tmpfs to /var/lib/etcd
mount:
path: /var/lib/etcd
src: tmpfs
fstype: tmpfs
opts: size=1g
state: mounted
- name: Prepare kubeadm config
template:
src: files/kubeadm_config.yaml
dest: /tmp/kubeadm_config.yaml
- name: Initialize the Kubernetes cluster using kubeadm
command: kubeadm init --config /tmp/kubeadm_config.yaml
- name: Generate join command
command: kubeadm token create --print-join-command
register: join_command
- name: "Copy kube config to localhost"
synchronize:
mode: pull
src: /etc/kubernetes/admin.conf
dest: /tmp/kube_config
...

View File

@ -20,42 +20,63 @@
src: files/hosts src: files/hosts
dest: /etc/hosts dest: /etc/hosts
- name: Loop devices
include_tasks:
file: loopback_devices.yaml
when: loopback_setup and inventory_hostname in (groups['k8s_cluster'] | default([]))
- name: Deploy Containerd - name: Deploy Containerd
include_tasks: include_tasks:
file: containerd.yaml file: containerd.yaml
- name: Common K8s tasks - name: Include K8s common tasks
include_tasks: include_tasks:
file: common_k8s.yaml file: k8s_common.yaml
when: inventory_hostname in (groups['k8s_cluster'] | default([]))
- name: Include control-plane tasks - name: Include K8s control-plane tasks
include_tasks: include_tasks:
file: control-plane.yaml file: k8s_control_plane.yaml
when: inventory_hostname == 'primary' when: inventory_hostname in (groups['k8s_control_plane'] | default([]))
- name: Join workload nodes to cluster - name: Join workload nodes to cluster
command: "{{ hostvars['primary']['join_command'].stdout_lines[0] }}" command: "{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['join_command', 'stdout_lines', 0]))[0] }}"
when: inventory_hostname in (groups['nodes'] | default([])) when: inventory_hostname in (groups['k8s_nodes'] | default([]))
- name: Wait for cluster is ready - name: Include K8s client tasks
become: false
block:
- name: Sleep 10 before checking calico nodes
pause:
seconds: 10
- name: Wait for Calico pods ready
command: kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=calico-node
when: inventory_hostname == 'primary'
- name: Add coredns to /etc/resolv.conf
lineinfile:
line: nameserver 10.96.0.10
path: /etc/resolv.conf
state: present
insertbefore: "BOF"
- name: Loop devices
include_tasks: include_tasks:
file: loopback_devices.yaml file: k8s_client.yaml
when: loopback_setup when: inventory_hostname in (groups['primary'] | default([]))
- name: Include K8s Calico tasks
include_tasks:
file: calico.yaml
- name: Include coredns resolver tasks
include_tasks:
file: coredns_resolver.yaml
- name: Include Openstack provider gateway tasks
include_tasks:
file: openstack_provider_gateway.yaml
when:
- openstack_provider_gateway_setup
- inventory_hostname in (groups['k8s_control_plane'] | default([]))
- name: Include Metallb tasks
include_tasks:
file: metallb.yaml
when: metallb_setup
- name: Include Openstack Metallb endpoint tasks
include_tasks:
file: openstack_metallb_endpoint.yaml
when:
- metallb_setup
- inventory_hostname in (groups['primary'] | default([]))
- name: Include client-to-cluster tunnel tasks
include_tasks:
file: client_cluster_tunnel.yaml
when: (groups['primary'] | difference(groups['k8s_control_plane']) | length > 0)
... ...

View File

@ -0,0 +1,64 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Deploy MetalLB
become: false
when: inventory_hostname in (groups['primary'] | default([]))
block:
- name: Add MetalLB chart repo
kubernetes.core.helm_repository:
name: metallb
repo_url: "https://metallb.github.io/metallb"
- name: Install MetalLB
kubernetes.core.helm:
name: metallb
chart_ref: metallb/metallb
namespace: metallb-system
create_namespace: true
- name: Sleep before trying to check MetalLB pods
pause:
seconds: 30
- name: Wait for MetalLB pods ready
command: kubectl -n metallb-system wait --timeout=240s --for=condition=Ready pods -l 'app.kubernetes.io/name=metallb'
- name: Create MetalLB address pool
shell: |
tee > /tmp/metallb_ipaddresspool.yaml <<EOF
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: public
namespace: metallb-system
spec:
addresses:
- "{{ metallb_pool_cidr }}"
EOF
kubectl apply -f /tmp/metallb_ipaddresspool.yaml
tee > /tmp/metallb_l2advertisement.yaml <<EOF
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: public
namespace: metallb-system
spec:
ipAddressPools:
- public
EOF
kubectl apply -f /tmp/metallb_l2advertisement.yaml
...

View File

@ -0,0 +1,77 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Create openstack ingress service
become: false
shell: |
tee > /tmp/openstack_endpoint_service.yaml <<EOF
---
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: openstack
name: openstack
name: openstack
---
kind: Service
apiVersion: v1
metadata:
name: public-openstack
namespace: openstack
annotations:
metallb.universe.tf/loadBalancerIPs: "{{ metallb_openstack_endpoint_cidr | ipaddr('address') }}"
spec:
externalTrafficPolicy: Cluster
type: LoadBalancer
selector:
app: ingress-api
ports:
- name: http
port: 80
- name: https
port: 443
EOF
kubectl apply -f /tmp/openstack_endpoint_service.yaml
- name: Set dnsmasq listen ip
set_fact:
nameserver_ip: "{{ (groups['primary'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']))[0] }}"
- name: Start dnsmasq
docker_container:
name: endpoint_dnsmasq
image: docker.io/openstackhelm/neutron:2023.2-ubuntu_jammy
network_mode: host
capabilities:
- NET_ADMIN
entrypoint: dnsmasq
command: |
--keep-in-foreground
--no-hosts
--bind-interfaces
--address="/openstack.svc.cluster.local/{{ metallb_openstack_endpoint_cidr | ipaddr('address') }}"
--listen-address="{{ nameserver_ip }}"
--no-resolv
--server=8.8.8.8
state: started
recreate: yes
- name: Configure /etc/resolv.conf
template:
src: files/resolv.conf
dest: /etc/resolv.conf
owner: root
group: root
mode: 0644
...

View File

@ -0,0 +1,78 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: Set cluster device
set_fact:
# cluster_default_dev: "{{ (groups['k8s_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'interface']))[0] }}"
cluster_default_dev: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}"
- name: Set up TAP interface on cluster control-plane node
shell: |
ip tuntap add name provider1 mode tap
ip link set provider1 up
ip addr add {{ openstack_provider_gateway_cidr }} dev provider1
- name: Set up SNAT for packets going outside the cluster
shell: |
iptables -t nat -A POSTROUTING -o {{ cluster_default_dev }} -s {{ openstack_provider_network_cidr }} -j MASQUERADE
# We use tcp proxy to forward traffic to make it possible to connect
# to the Openstack public endpoint (managed by Metallb) from VMs.
- name: Setup TCP proxy
when: metallb_setup
block:
- name: Prepare nginx tcp proxy config
template:
src: files/nginx_tcp_proxy.conf
dest: /tmp/nginx_tcp_proxy.conf
owner: root
group: root
mode: 0644
- name: Start provider network tcp proxy
docker_container:
name: nginx_tcp_proxy
image: docker.io/nginx:alpine3.18
network_mode: host
capabilities:
- NET_ADMIN
- NET_RAW
mounts:
- source: /tmp/nginx_tcp_proxy.conf
target: /etc/nginx/nginx.conf
type: bind
entrypoint: nginx
command: |
-g 'daemon off;'
state: started
recreate: yes
- name: Start provider network dnsmasq
docker_container:
name: provider_dnsmasq
image: docker.io/openstackhelm/neutron:2023.2-ubuntu_jammy
network_mode: host
capabilities:
- NET_ADMIN
entrypoint: dnsmasq
command: |
--keep-in-foreground
--no-hosts
--bind-interfaces
--address="/openstack.svc.cluster.local/{{ openstack_provider_gateway_cidr | ipaddr('address') }}"
--listen-address="{{ openstack_provider_gateway_cidr | ipaddr('address') }}"
--no-resolv
--server=8.8.8.8
state: started
recreate: yes
...

View File

@ -22,52 +22,49 @@
state: present state: present
filename: ceph.list filename: ceph.list
- name: Add Kubernetes apt repository key
apt_key:
url: "https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/Release.key"
state: present
- name: Add Kubernetes apt repository
apt_repository:
repo: "deb https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/ /"
state: present
filename: kubernetes.list
- name: Install necessary packages - name: Install necessary packages
apt: apt:
pkg: pkg:
- socat - apt-transport-https
- jq
- util-linux
- bridge-utils
- iptables
- conntrack
- libffi-dev
- ipvsadm
- make
- bc - bc
- git-review - bridge-utils
- notary
- ceph-common
- rbd-nbd
- nfs-common
- ethtool
- python3-dev
- ca-certificates - ca-certificates
- git - ceph-common
- nmap - conntrack
- curl - curl
- uuid-runtime - ethtool
- net-tools - git
- git-review
- gnupg2
- iptables
- ipvsadm
- jq
- less - less
- telnet - libffi-dev
- tcpdump
- vim
- lvm2 - lvm2
- make
- name: Deploy Helm - net-tools
when: inventory_hostname == 'primary' - nfs-common
block: - nmap
- name: Install Helm - notary
shell: | - python3-dev
TMP_DIR=$(mktemp -d) - rbd-nbd
curl -sSL https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR} - socat
mv "${TMP_DIR}"/helm /usr/local/bin/helm - tcpdump
rm -rf "${TMP_DIR}" - telnet
args: - util-linux
executable: /bin/bash - uuid-runtime
- vim
# This is to improve build time - wireguard
- name: Remove stable Helm repo
command: helm repo remove stable
ignore_errors: true
... ...

View File

@ -308,4 +308,23 @@
parent: openstack-helm-compute-kit-ovn-2023-2-ubuntu_jammy parent: openstack-helm-compute-kit-ovn-2023-2-ubuntu_jammy
files: files:
- ^ovn/.* - ^ovn/.*
- ^roles/deploy-env.*
- job:
name: openstack-helm-infra-compute-kit-2023-2-ubuntu_jammy
parent: openstack-helm-compute-kit-2023-2-ubuntu_jammy
files:
- ^roles/deploy-env.*
- job:
name: openstack-helm-infra-cinder-2023-2-ubuntu_jammy
parent: openstack-helm-cinder-2023-2-ubuntu_jammy
files:
- ^roles/deploy-env.*
- job:
name: openstack-helm-infra-tls-2023-1-ubuntu_focal
parent: openstack-helm-tls-2023-1-ubuntu_focal
files:
- ^roles/deploy-env.*
... ...

View File

@ -31,6 +31,9 @@
- openstack-helm-infra-mariadb-operator - openstack-helm-infra-mariadb-operator
- openstack-helm-compute-kit-dpdk-ubuntu_jammy - openstack-helm-compute-kit-dpdk-ubuntu_jammy
- openstack-helm-infra-compute-kit-ovn-2023-2-ubuntu_jammy - openstack-helm-infra-compute-kit-ovn-2023-2-ubuntu_jammy
- openstack-helm-infra-compute-kit-2023-2-ubuntu_jammy
- openstack-helm-infra-cinder-2023-2-ubuntu_jammy
- openstack-helm-infra-tls-2023-1-ubuntu_focal
gate: gate:
jobs: jobs:
- openstack-helm-lint - openstack-helm-lint