Update to CAPO 0.8.0 + v1alpha7 (#110)

This commit is contained in:
Matt Pryor 2023-09-22 15:29:36 +01:00 committed by GitHub
parent 2a132eb122
commit 11c17bbac2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 412 additions and 272 deletions

115
.github/actions/setup/action.yml vendored Normal file
View File

@ -0,0 +1,115 @@
name: Set up test environment
description: >-
Sets up a Cluster API management cluster for a test.
inputs:
dependencies-path:
description: Path to the dependencies file to use.
default: dependencies.json
runs:
using: "composite"
steps:
- name: Read dependencies
id: deps
shell: bash
run: |
echo "addon-provider=$(jq -r '.["addon-provider"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api=$(jq -r '.["cluster-api"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api-janitor-openstack=$(jq -r '.["cluster-api-janitor-openstack"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api-provider-openstack=$(jq -r '.["cluster-api-provider-openstack"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cert-manager=$(jq -r '.["cert-manager"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "helm=$(jq -r '.["helm"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "sonobuoy=$(jq -r '.["sonobuoy"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
env:
DEPENDENCIES_PATH: ${{ inputs.dependencies-path }}
- name: Install tools
shell: bash
run: sudo apt install -y zip unzip
- name: Install sonobuoy
shell: bash
run: >
wget https://github.com/vmware-tanzu/sonobuoy/releases/download/${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
tar -xf sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
sudo install -o root -g root -m 0755 sonobuoy /usr/local/bin/sonobuoy &&
sonobuoy version
env:
SONOBUOY_VERSION: ${{ steps.deps.outputs.sonobuoy }}
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: ${{ steps.deps.outputs.helm }}
- name: Install cert-manager
shell: bash
run: |-
helm upgrade cert-manager cert-manager \
--repo https://charts.jetstack.io \
--version ${{ steps.deps.outputs.cert-manager }} \
--namespace cert-manager \
--create-namespace \
--install \
--set installCRDs=true \
--wait \
--timeout 10m
- name: Install clusterctl
shell: bash
run: >
curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_VERSION}/clusterctl-linux-amd64 -o clusterctl &&
sudo install -o root -g root -m 0755 clusterctl /usr/local/bin/clusterctl &&
clusterctl version
env:
CAPI_VERSION: ${{ steps.deps.outputs.cluster-api }}
- name: Check if Cluster API is already installed
id: capi-check
shell: bash
run: kubectl get provider -n capi-system cluster-api
continue-on-error: true
- name: Install or upgrade Cluster API controllers
shell: bash
run: >
clusterctl ${{ steps.capi-check.outcome == 'success' && 'upgrade apply' || 'init' }} \
--core cluster-api:${CAPI_VERSION} \
--control-plane kubeadm:${CAPI_VERSION} \
--bootstrap kubeadm:${CAPI_VERSION} \
--infrastructure openstack:${CAPO_VERSION} \
--wait-providers
env:
CAPI_VERSION: ${{ steps.deps.outputs.cluster-api }}
CAPO_VERSION: ${{ steps.deps.outputs.cluster-api-provider-openstack }}
- name: Install Cluster API add-on provider
shell: bash
run: |-
helm upgrade cluster-api-addon-provider cluster-api-addon-provider \
--repo https://stackhpc.github.io/cluster-api-addon-provider \
--version ${{ steps.deps.outputs.addon-provider }} \
--namespace capi-addon-system \
--create-namespace \
--install \
--wait \
--timeout 10m
- name: Install Cluster API janitor
shell: bash
run: |-
helm upgrade cluster-api-janitor-openstack cluster-api-janitor-openstack \
--repo https://stackhpc.github.io/cluster-api-janitor-openstack \
--version ${{ steps.deps.outputs.cluster-api-janitor-openstack }} \
--namespace capi-janitor-system \
--create-namespace \
--install \
--wait \
--timeout 10m

View File

@ -11,21 +11,19 @@ inputs:
os-client-config-file:
description: The path of the OpenStack clouds file
required: true
default: ./clouds.yml
default: ./clouds.yaml
os-cloud:
description: The name of the cloud within the OpenStack clouds file
required: true
default: openstack
chart-version:
description: >-
Use the specified chart version from the repo if given. If not,
use the chart from the directory in the current checkout.
chart-directory:
description: The directory containing the chart
required: true
default: ""
values-common-path:
description: The path to a file containing common values
default: charts/openstack-cluster
values-path:
description: The path to a file containing Helm values
required: true
default: ./values-common.yaml
default: ./values.yaml
kubernetes-version:
description: The Kubernetes version in the image
required: true
@ -33,43 +31,37 @@ inputs:
description: The ID of the image to use
required: true
sonobuoy-mode:
description: Specify "full" to do a full Sonobuoy run, anything else runs a smoke test only
description: |
The mode for the Sonobuoy run.
One of certified-conformance, conformance-lite, non-disruptive-conformance, quick.
required: true
default: smoke
default: quick
sonobuoy-upload:
description: Specify "yes" to upload the Sonobuoy run as an artifact
required: true
default: "no"
skip-workload-status:
description: Specify "yes" to skip the workload status check
required: true
default: "no"
runs:
using: "composite"
steps:
- name: Update dependencies for chart
shell: bash
run: helm dependency update ${{ inputs.chart-directory }}
- name: Install or upgrade cluster from directory
shell: bash
run: |-
helm upgrade ${{ inputs.name }} ./charts/openstack-cluster \
helm upgrade ${{ inputs.name }} ${{ inputs.chart-directory }} \
--install \
--dependency-update \
--values ${{ inputs.os-client-config-file }} \
--values ${{ inputs.values-common-path }} \
--values ${{ inputs.values-path }} \
--set cloudName=${{ inputs.os-cloud }} \
--set kubernetesVersion=${{ inputs.kubernetes-version }} \
--set machineImageId=${{ inputs.image-id }}
if: "${{ inputs.chart-version == '' }}"
- name: Install or upgrade cluster from repository
shell: bash
run: |-
helm upgrade ${{ inputs.name }} openstack-cluster \
--repo https://stackhpc.github.io/capi-helm-charts \
--version ${{ inputs.chart-version }} \
--install \
--values ${{ inputs.os-client-config-file }} \
--values ${{ inputs.values-common-path }} \
--set cloudName=${{ inputs.os-cloud }} \
--set kubernetesVersion=${{ inputs.kubernetes-version }} \
--set machineImageId=${{ inputs.image-id }}
if: "${{ inputs.chart-version != '' }}"
# Wait for any upgrade to start before checking if it is complete
# This is to make sure the controller has actioned the update before
@ -121,19 +113,28 @@ runs:
-o go-template='{{ .data.value | base64decode }}' \
> kubeconfig
- name: Run sonobuoy [smoke]
- name: Wait for all workloads rollouts to complete
shell: bash
run: sonobuoy run --mode quick --wait
run: |-
set -e
NAMESPACES=$(kubectl get ns --no-headers --output jsonpath='{.items[*].metadata.name}')
for ns in $NAMESPACES; do
echo "namespace: $ns"
kubectl rollout status \
--namespace "$ns" \
--watch \
--timeout 20m \
deployments,statefulsets,daemonsets
done
env:
KUBECONFIG: ./kubeconfig
if: "${{ inputs.sonobuoy-mode != 'full' }}"
if: "${{ inputs.skip-workload-status != 'yes' }}"
- name: Run sonobuoy [full]
- name: Run sonobuoy
shell: bash
run: sonobuoy run --wait
run: sonobuoy run --mode ${{ inputs.sonobuoy-mode }} --wait
env:
KUBECONFIG: ./kubeconfig
if: "${{ inputs.sonobuoy-mode == 'full' }}"
- name: Retrieve sonobuoy results
shell: bash

27
.github/actions/upload-logs/action.yml vendored Normal file
View File

@ -0,0 +1,27 @@
name: Upload logs
description: >-
Upload Cluster API controller logs as a workflow artifact.
inputs:
name-suffix:
description: The suffix to add to the controller logs.
required: true
runs:
using: "composite"
steps:
- name: Output controller logs
shell: bash
run: |
kubectl -n capi-system logs deploy/capi-controller-manager > capi-logs.txt
kubectl -n capi-kubeadm-control-plane-system logs deploy/capi-kubeadm-control-plane-controller-manager > capi-kubeadm-control-plane-logs.txt
kubectl -n capi-kubeadm-bootstrap-system logs deploy/capi-kubeadm-bootstrap-controller-manager > capi-kubeadm-bootstrap-logs.txt
kubectl -n capo-system logs deploy/capo-controller-manager > capo-logs.txt
kubectl -n capi-addon-system logs deploy/cluster-api-addon-provider > capi-addon-provider-logs.txt
- name: Upload controller log artifacts
uses: actions/upload-artifact@v3
with:
name: cluster-api-controller-logs-${{ inputs.name-suffix }}
path: ./*-logs.txt

View File

@ -40,10 +40,12 @@ jobs:
include:
- name: kube-1-26
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-26-focal }}
skip: ${{ github.event.pull_request.draft }}
skip: false
# skip: ${{ github.event.pull_request.draft }}
- name: kube-1-27
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-27-focal }}
skip: ${{ github.event.pull_request.draft }}
skip: false
# skip: ${{ github.event.pull_request.draft }}
- name: kube-1-28
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-28-focal }}
skip: false
@ -79,7 +81,7 @@ jobs:
if: ${{ !matrix.skip }}
- name: Write matrix outputs
uses: cloudposse/github-action-matrix-outputs-write@main
uses: cloudposse/github-action-matrix-outputs-write@0.4.2
with:
matrix-step-name: ${{ github.job }}
matrix-key: ${{ matrix.name }}
@ -101,6 +103,6 @@ jobs:
steps:
- name: Read matrix outputs
id: matrix-outputs
uses: cloudposse/github-action-matrix-outputs-read@main
uses: cloudposse/github-action-matrix-outputs-read@0.1.1
with:
matrix-step-name: ensure_image

View File

@ -23,13 +23,12 @@ jobs:
- name: Set up chart-testing
uses: scrungus/chart-testing-action@v3.7.3
- name: Dummy values
uses: DamianReeves/write-file-action@master
with:
path: values.yml
write-mode: overwrite
contents: |
- name: Create dummy values
run: >
echo "$VALUES" > values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
@ -57,4 +56,4 @@ jobs:
--target-branch ${{ github.event.repository.default_branch }} \
--all \
--validate-maintainers=false \
--extra-values values.yml
--extra-values values.yaml

View File

@ -24,5 +24,5 @@ jobs:
with:
# Pass the images as JSON
images: ${{ toJSON(needs.ensure_capi_images.outputs) }}
# Only run the smoke tests on main
# Only run the sanity check on main
tests-full: false

View File

@ -32,7 +32,7 @@ jobs:
with:
# Pass the images as JSON
images: ${{ toJSON(needs.ensure_capi_images.outputs) }}
# If the PR is in draft, just run smoke tests
# If the PR is in draft, just run a sanity check
# If the PR is in review, run the full test suite
tests-full: ${{ !github.event.pull_request.draft }}
if: github.repository == 'stackhpc/capi-helm-charts'

View File

@ -8,153 +8,36 @@ on:
required: true
tests-full:
type: boolean
description: Indicates whether to run the full test suite or just a smoke test
description: Indicates whether to run the full test suite or just a sanity check
required: true
default: false
jobs:
test-chart:
# This job tests a clean deployment against the latest version
# It is the only job that runs when tests-full=false
# For tests-full=true it creates an internal network + router and runs Sonobuoy in conformance mode
# For tests-full=false it uses a pre-existing internal network and runs Sonobuoy in quick mode
latest:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Read dependencies
id: deps
run: |
echo "addon-provider=$(jq -r '.["addon-provider"]' ./dependencies.json)" >> $GITHUB_OUTPUT
echo "cluster-api=$(jq -r '.["cluster-api"]' ./dependencies.json)" >> $GITHUB_OUTPUT
echo "cluster-api-provider-openstack=$(jq -r '.["cluster-api-provider-openstack"]' ./dependencies.json)" >> $GITHUB_OUTPUT
echo "cert-manager=$(jq -r '.["cert-manager"]' ./dependencies.json)" >> $GITHUB_OUTPUT
echo "helm=$(jq -r '.["helm"]' ./dependencies.json)" >> $GITHUB_OUTPUT
echo "sonobuoy=$(jq -r '.["sonobuoy"]' ./dependencies.json)" >> $GITHUB_OUTPUT
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
- name: Install tools
run: sudo apt install -y zip unzip
- name: Install sonobuoy
run: >
wget https://github.com/vmware-tanzu/sonobuoy/releases/download/${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
tar -xf sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
sudo mv -n sonobuoy /usr/bin/
env:
SONOBUOY_VERSION: ${{ steps.deps.outputs.sonobuoy }}
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: ${{ steps.deps.outputs.helm }}
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1.5.0
- name: Install cert-manager
run: |-
helm upgrade cert-manager cert-manager \
--repo https://charts.jetstack.io \
--version ${{ steps.deps.outputs.cert-manager }} \
--namespace cert-manager \
--create-namespace \
--install \
--set installCRDs=true \
--wait \
--timeout 10m
- name: Ensure Cluster API kustomization directory exists
run: mkdir -p clusterapi
# From here: https://github.com/stackhpc/ansible-collection-azimuth-ops/blob/main/roles/clusterapi/defaults/main.yml
- name: Write Cluster API kustomization file
uses: DamianReeves/write-file-action@master
with:
path: clusterapi/kustomization.yaml
write-mode: overwrite
contents: |
resources:
- https://github.com/kubernetes-sigs/cluster-api/releases/download/${{ steps.deps.outputs.cluster-api }}/cluster-api-components.yaml
- https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/download/${{ steps.deps.outputs.cluster-api-provider-openstack }}/infrastructure-components.yaml
patches:
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-system
name: capi-controller-manager
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-kubeadm-bootstrap-system
name: capi-kubeadm-bootstrap-controller-manager
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-kubeadm-control-plane-system
name: capi-kubeadm-control-plane-controller-manager
- name: Install Cluster API resources
run: kubectl apply -k clusterapi/
- name: Wait for Cluster API controllers to become ready
run: |-
kubectl rollout status deployment/capi-controller-manager \
--namespace capi-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capi-kubeadm-bootstrap-controller-manager \
--namespace capi-kubeadm-bootstrap-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capi-kubeadm-control-plane-controller-manager \
--namespace capi-kubeadm-control-plane-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capo-controller-manager \
--namespace capo-system \
--timeout 10m
- name: Install Cluster API add-on provider
run: |-
helm upgrade cluster-api-addon-provider cluster-api-addon-provider \
--repo https://stackhpc.github.io/cluster-api-addon-provider \
--version ${{ steps.deps.outputs.addon-provider }} \
--namespace capi-addon-system \
--create-namespace \
--install \
--wait \
--timeout 10m
- name: Set up test environment
uses: ./.github/actions/setup
- name: Write cloud credential
run: >
echo "$CLOUD" > clouds.yml
shell: bash
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write common Helm values
uses: DamianReeves/write-file-action@master
with:
path: values-common.yaml
write-mode: overwrite
contents: |
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
@ -171,26 +54,123 @@ jobs:
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineCount: 2
#####
# For the smoke test, we do a clean deployment of the latest supported version
#####
- name: Apply network configuration
run: echo "$NETWORKING" >> ./values.yaml
env:
NETWORKING: |
clusterNetworking:
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
if: ${{ !inputs.tests-full }}
- name: Test clean Kubernetes 1.28 deployment
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-kube-latest
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
sonobuoy-upload: "yes"
if: ${{ !inputs.tests-full }}
sonobuoy-mode: ${{ inputs.tests-full && 'certified-conformance' || 'quick' }}
sonobuoy-upload: ${{ inputs.tests-full && 'yes' || 'no' }}
- name: Delete Kubernetes 1.28 deployment
run: helm delete ci-${{ github.run_id }}-kube-latest --wait
if: ${{ !inputs.tests-full && always() }}
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
#####
# On a full test, first test that we can upgrade from a cluster deployed using the latest tag
#####
- name: Upload logs
uses: ./.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# This job tests Kubernetes upgrade
# It only runs for non-draft PRs
# It uses a pre-existing internal network
kube-upgrade:
runs-on: ubuntu-latest
if: ${{ inputs.tests-full }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
- name: Set up test environment
uses: ./.github/actions/setup
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
registryMirrors:
docker.io:
- ${{ secrets.DOCKER_HUB_MIRROR_URL }}
clusterNetworking:
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineCount: 2
- name: Deploy Kubernetes 1.26 for Kubernetes upgrade test
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-26-version }}
image-id: ${{ fromJson(inputs.images).kube-1-26-image }}
- name: Upgrade to Kubernetes 1.27
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }}
image-id: ${{ fromJson(inputs.images).kube-1-27-image }}
- name: Upgrade to Kubernetes 1.28
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
- name: Delete Kubernetes upgrade deployment
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
- name: Upload logs
uses: ./.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# This jobs tests upgrading the chart + dependencies from the latest tag
# It only runs for non-draft PRs
# It uses a pre-existing internal network
# It installs ALL of the addons so that we test upgrading them
chart-upgrade:
runs-on: ubuntu-latest
if: ${{ inputs.tests-full }}
steps:
- name: Checkout current
uses: actions/checkout@v3
with:
path: current
- name: Get latest tag
id: latest-tag
@ -198,76 +178,94 @@ jobs:
set -eo pipefail
TAG_NAME="$(curl -fsSL "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases/latest" | jq -r '.tag_name')"
echo "tag-name=${TAG_NAME}" >> "$GITHUB_OUTPUT"
if: ${{ inputs.tests-full }}
- name: Deploy Kubernetes 1.28 with latest tag for chart upgrade test
uses: ./.github/actions/upgrade-and-test
- name: Checkout latest tag
uses: actions/checkout@v3
with:
name: ci-${{ github.run_id }}-chart-upgrade
ref: ${{ steps.latest-tag.outputs.tag-name }}
path: latest-tag
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
registryMirrors:
docker.io:
- ${{ secrets.DOCKER_HUB_MIRROR_URL }}
clusterNetworking:
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineCount: 2
addons:
kubernetesDashboard:
enabled: true
monitoring:
enabled: true
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
# For the setup, we use a merged dependencies file in case new dependencies
# are added by the code under test, ensuring that the older dependencies are
# used where they are specified
- name: Create merged dependencies file
run: >
jq -s '.[0] * .[1]' \
current/dependencies.json \
latest-tag/dependencies.json \
> dependencies-merged.json
- name: Set up test environment with dependencies from latest tag
uses: ./current/.github/actions/setup
with:
dependencies-path: dependencies-merged.json
- name: Deploy cluster with chart from latest tag
uses: ./current/.github/actions/upgrade-and-test
with:
chart-directory: latest-tag/charts/openstack-cluster
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
chart-version: ${{ steps.latest-tag.outputs.tag-name }}
if: ${{ inputs.tests-full }}
# TODO(mkjpryor) remove this once calico csi-node-driver is fixed in a tagged version
skip-workload-status: "yes"
- name: Upgrade to current chart
uses: ./.github/actions/upgrade-and-test
- name: Update test environment with current dependencies
uses: ./current/.github/actions/setup
with:
name: ci-${{ github.run_id }}-chart-upgrade
dependencies-path: current/dependencies.json
- name: Upgrade cluster to current chart
uses: ./current/.github/actions/upgrade-and-test
with:
chart-directory: current/charts/openstack-cluster
name: ci-${{ github.run_id }}-${{ github.job }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
if: ${{ inputs.tests-full }}
- name: Delete chart upgrade deployment
run: helm delete ci-${{ github.run_id }}-chart-upgrade --wait
if: ${{ inputs.tests-full && always() }}
#####
# On a full test, next run a full upgrade test
# This will run a sonobuoy smoke test after every step with a full test at the end
#####
- name: Deploy Kubernetes 1.26 for Kubernetes upgrade test
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-kube-upgrade
kubernetes-version: ${{ fromJson(inputs.images).kube-1-26-version }}
image-id: ${{ fromJson(inputs.images).kube-1-26-image }}
if: ${{ inputs.tests-full }}
- name: Upgrade to Kubernetes 1.27
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-kube-upgrade
kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }}
image-id: ${{ fromJson(inputs.images).kube-1-27-image }}
if: ${{ inputs.tests-full }}
- name: Upgrade to Kubernetes 1.28
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-kube-upgrade
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
sonobuoy-mode: full
sonobuoy-upload: "yes"
if: ${{ inputs.tests-full }}
- name: Delete Kubernetes upgrade deployment
run: helm delete ci-${{ github.run_id }}-kube-upgrade --wait
if: ${{ inputs.tests-full && always() }}
- name: Output controller logs
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
run: |
kubectl -n capi-system logs deploy/capi-controller-manager > capi-logs.txt
kubectl -n capi-kubeadm-control-plane-system logs deploy/capi-kubeadm-control-plane-controller-manager > capi-kubeadm-control-plane-logs.txt
kubectl -n capi-kubeadm-bootstrap-system logs deploy/capi-kubeadm-bootstrap-controller-manager > capi-kubeadm-bootstrap-logs.txt
kubectl -n capo-system logs deploy/capo-controller-manager > capo-logs.txt
kubectl -n capi-addon-system logs deploy/cluster-api-addon-provider > capi-addon-provider-logs.txt
- name: Upload controller log artifacts
uses: actions/upload-artifact@v3
if: ${{ always() }}
- name: Upload logs
uses: ./current/.github/actions/upload-logs
with:
name: cluster-api-controller-logs
path: ./*-logs.txt
name-suffix: ${{ github.job }}
if: ${{ always() }}

View File

@ -24,6 +24,11 @@ jobs:
- key: cluster-api
type: github
repo: kubernetes-sigs/cluster-api
- key: cluster-api-janitor-openstack
type: helm
repo_url: https://stackhpc.github.io/cluster-api-janitor-openstack
chart_name: cluster-api-janitor-openstack
- key: cluster-api-provider-openstack
type: github

View File

@ -47,7 +47,7 @@ cni:
chart:
repo: https://projectcalico.docs.tigera.io/charts
name: tigera-operator
version: v3.24.5
version: v3.24.6
release:
namespace: tigera-operator
values: {}

View File

@ -1,5 +1,5 @@
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
kind: OpenStackCluster
metadata:
name: {{ include "openstack-cluster.clusterName" . }}

View File

@ -13,7 +13,7 @@ spec:
name: {{ include "openstack-cluster.componentName" (list . "control-plane") }}
namespace: {{ .Release.Namespace }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
kind: OpenStackCluster
name: {{ include "openstack-cluster.clusterName" . }}
namespace: {{ .Release.Namespace }}

View File

@ -84,7 +84,7 @@ spec:
labels: {{ include "openstack-cluster.componentSelectorLabels" (list . "control-plane") | nindent 8 }}
infrastructureRef:
kind: OpenStackMachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
name: {{ include "openstack-cluster.controlplane.mt.name" . }}
namespace: {{ .Release.Namespace }}
nodeDrainTimeout: {{ .Values.controlPlane.nodeDrainTimeout }}

View File

@ -23,9 +23,6 @@ template:
{{- else }}
{{- fail "Either machineImage or machineImageId is required" }}
{{- end }}
{{- with .Values.controlPlane.machineNetworking.networks }}
networks: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.controlPlane.machineNetworking.ports }}
ports: {{ toYaml . | nindent 6 }}
{{- end }}
@ -40,7 +37,7 @@ template:
{{- include "openstack-cluster.componentName" (list . "control-plane") }}-{{ trunc 8 $checksum }}
{{- end }}
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
kind: OpenStackMachineTemplate
metadata:
name: {{ include "openstack-cluster.controlplane.mt.name" . }}

View File

@ -57,7 +57,7 @@ spec:
kind: KubeadmConfigTemplate
name: {{ include "openstack-cluster.nodegroup.kct.name" (list $ $nodeGroup) }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
kind: OpenStackMachineTemplate
name: {{ include "openstack-cluster.nodegroup.mt.name" (list $ $nodeGroup) }}
nodeDrainTimeout: {{ $nodeGroup.nodeDrainTimeout }}

View File

@ -25,9 +25,6 @@ template:
{{- else }}
{{- fail "Either machineImage or machineImageId is required" }}
{{- end }}
{{- with $nodeGroup.machineNetworking.networks }}
networks: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with $nodeGroup.machineNetworking.ports }}
ports: {{ toYaml . | nindent 6 }}
{{- end }}
@ -47,7 +44,7 @@ template:
{{- range $nodeGroupOverrides := .Values.nodeGroups }}
{{- $nodeGroup := deepCopy $.Values.nodeGroupDefaults | mustMerge $nodeGroupOverrides }}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha6
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7
kind: OpenStackMachineTemplate
metadata:
name: {{ include "openstack-cluster.nodegroup.mt.name" (list $ $nodeGroup) }}

View File

@ -130,11 +130,10 @@ controlPlane:
machineCount: 3
# The flavor to use for control plane machines
machineFlavor:
# The networks and ports for control plane nodes
# If neither networks or ports are given, the cluster internal network is used
# The ports for control plane nodes
# If no ports are given, the cluster internal network is used
# See https://github.com/kubernetes-sigs/cluster-api-provider-openstack/blob/master/docs/book/src/clusteropenstack/configuration.md#network-filters
machineNetworking:
networks:
ports:
# The root volume spec for control plane machines
machineRootVolume:
@ -225,11 +224,10 @@ nodeGroupDefaults:
failureDomain:
# The flavor to use for machines in the node group
machineFlavor:
# The default networks and ports for worker nodes
# If neither networks or ports are given, the cluster internal network is used
# The default ports for worker nodes
# If no ports are given, the cluster internal network is used
# See https://github.com/kubernetes-sigs/cluster-api-provider-openstack/blob/master/docs/book/src/clusteropenstack/configuration.md#network-filters
machineNetworking:
networks:
ports:
# The root volume spec for machines in the node group
machineRootVolume:

View File

@ -2,7 +2,8 @@
"addon-provider": "0.1.0",
"azimuth-images": "0.2.0",
"cluster-api": "v1.5.1",
"cluster-api-provider-openstack": "v0.7.3",
"cluster-api-janitor-openstack": "0.1.0",
"cluster-api-provider-openstack": "v0.8.0",
"cert-manager": "v1.12.3",
"helm": "v3.12.3",
"sonobuoy": "v0.56.16"