Merge pull request #14 from stackhpc/ci

Add CI changes
This commit is contained in:
John Garbutt 2022-11-18 12:20:39 +00:00 committed by GitHub
commit bd02690f58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 65 additions and 18 deletions

33
.github/workflows/lint.yaml vendored Normal file
View File

@ -0,0 +1,33 @@
name: Lint Helm Charts
# Run the tasks on every push
on: [pull_request]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up chart-testing
uses: scrungus/chart-testing-action@v3.7.3
- name: Copy cloud creds to file
run: 'echo "$TYLER_CLOUDS" > clouds.yml'
shell: bash
env:
TYLER_CLOUDS: ${{ secrets.TYLER_CLOUDS }}
- name: Run chart-testing (lint)
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --extra-values clouds.yml

View File

@ -1,12 +1,12 @@
# The name of the Cluster API cluster
# If not given, the release name is used
# if not given, the release name is used
---
clusterName:
# Settings for hook jobs
hooks:
image:
repository: ghcr.io/stackhpc/capi-helm-utils
tag: # Defaults to chart appVersion if not given
tag: # Defaults to chart appVersion if not given
pullPolicy: IfNotPresent
imagePullSecrets: []
backoffLimit: 1000
@ -218,7 +218,7 @@ mellanoxNetworkOperator:
# Settings for any custom addons
custom: {}
# # Indexed by the name of the release on the target cluster
# #Indexed by the name of the release on the target cluster
# my-custom-helm-release:
# # Indicates that this is a Helm addon
# kind: HelmRelease

View File

@ -0,0 +1,12 @@
---
kubernetesVersion: 1.24.2
machineImageId: 9b5d513c-67b4-4fd6-8bf1-3bf4525c04c3
controlPlane:
machineFlavor: vm.alaska.cpu.general.small
nodeGroups:
- machineCount: 2
machineFlavor: vm.alaska.cpu.general.small
name: test-group

View File

@ -1,4 +1,5 @@
# The name of an existing secret containing a clouds.yaml and optional cacert
---
cloudCredentialsSecretName:
# OR
# Content for the clouds.yaml file
@ -51,10 +52,10 @@ clusterNetworking:
# Details of the internal network to use
internalNetwork:
# Filter to find an existing network for the cluster internal network
# See Cluster API documentation for details
# see Cluster API documentation for details
networkFilter:
# id: e63ca1a0-f69d-4fbf-b306-310857b1afe5
# name: tenant-internal-net
# id: e63ca1a0-f69d-4fbf-b306-310857b1afe5
# name: tenant-internal-net
# Filter to find an existing subnet for the cluster internal network
# See Cluster API documentation for details
subnetFilter:
@ -134,8 +135,9 @@ controlPlane:
# The time to wait for a node to finish draining before it can be removed
nodeDrainTimeout: 5m
# The rollout strategy to use for the control plane nodes
# By default, the strategy allows the control plane to begin provisioning new nodes
# without first tearing down old ones
# without first tearing down old ones
rolloutStrategy:
type: RollingUpdate
rollingUpdate:
@ -150,8 +152,8 @@ controlPlane:
kubeletExtraArgs:
cloud-provider: external
# As well as enabling an external cloud provider, we set the bind addresses for the
# etcd metrics, controller-manager, scheduler and kube-proxy to 0.0.0.0 so that Prometheus
# can reach them to collect metrics
# etcd metrics, controller-manager, scheduler and kube-proxy to 0.0.0.0 so that Prometheus
# can reach them to collect metrics
clusterConfiguration:
etcd:
local:
@ -184,7 +186,7 @@ controlPlane:
# By default, unhealthy control plane nodes are always remediated
maxUnhealthy: 100%
# By default, consider a control plane node that has not been Ready
# for more than 5 mins unhealthy
# for more than 5 mins unhealthy
unhealthyConditions:
- type: Ready
status: Unknown
@ -222,17 +224,17 @@ nodeGroupDefaults:
# The time to wait for a node to finish draining before it can be removed
nodeDrainTimeout: 5m
# The rollout strategy to use for the node group
# By default, this is set to do a rolling update within the existing resource envelope
# of the node group, even if that means the node group temporarily has zero nodes
# By default, this is set to do a rolling update within the existing resource envelope
# of the node group, even if that means the node group temporarily has zero nodes
rolloutStrategy:
type: RollingUpdate
rollingUpdate:
# The maximum number of node group machines that can be unavailable during the update
# Can be an absolute number or a percentage of the desired count
# Can be an absolute number or a percentage of the desired count
maxUnavailable: 1
# The maximum number of machines that can be scheduled above the desired count for
# the group during an update
# Can be an absolute number or a percentage of the desired count
# the group during an update
# Can be an absolute number or a percentage of the desired count
maxSurge: 0
# One of Random, Newest, Oldest
deletePolicy: Random
@ -258,7 +260,7 @@ nodeGroupDefaults:
# If a node takes longer than 10 mins to startup, remediate it
nodeStartupTimeout: 10m
# By default, consider a worker node that has not been Ready for
# more than 5 mins unhealthy
# more than 5 mins unhealthy
unhealthyConditions:
- type: Ready
status: Unknown
@ -269,7 +271,7 @@ nodeGroupDefaults:
# The worker node groups for the cluster
nodeGroups:
- # The name of the node group
- # The name of the node group
name: md-0
# The number of machines in the node group if autoscale is false
machineCount: 3