337 lines
12 KiB
YAML
337 lines
12 KiB
YAML
---
|
||
|
||
global:
|
||
# The prefix to use for images
|
||
# Assumes the images have been synced using Skopeo using the configuration file
|
||
# in this repository with the "--scoped" flag
|
||
# This setting is shared with the addons
|
||
# By default, use the mirrors on GitHub packages
|
||
imagePrefix: ghcr.io/stackhpc/capi/
|
||
|
||
# The name of an existing secret containing a clouds.yaml and optional cacert
|
||
cloudCredentialsSecretName:
|
||
# OR
|
||
# Content for the clouds.yaml file
|
||
# Having this as a top-level item allows a clouds.yaml file from OpenStack to be used as a values file
|
||
clouds:
|
||
# The PEM-encoded CA certificate for the specified cloud
|
||
cloudCACert:
|
||
|
||
# The name of the cloud to use from the specified clouds.yaml
|
||
cloudName: openstack
|
||
|
||
# The Kubernetes version of the cluster
|
||
# This should match the version of kubelet and kubeadm in the image
|
||
kubernetesVersion:
|
||
|
||
# The name of the image to use for cluster machines
|
||
machineImage:
|
||
# OR
|
||
# The ID of the image to use for cluster machines
|
||
machineImageId:
|
||
|
||
# The name of the SSH key to inject into cluster machines
|
||
machineSSHKeyName:
|
||
|
||
# The prefix used for project labels and annotations
|
||
projectPrefix: capi.stackhpc.com
|
||
|
||
# Any extra annotations to add to the cluster
|
||
clusterAnnotations: {}
|
||
|
||
# Values for the Kubernetes cluster network
|
||
kubeNetwork:
|
||
# By default, use the private network range 172.16.0.0/12 for the cluster network
|
||
# We split it into two equally-sized blocks for pods and services
|
||
# This gives ~500,000 addresses in each block
|
||
pods:
|
||
cidrBlocks:
|
||
- 172.16.0.0/13
|
||
services:
|
||
cidrBlocks:
|
||
- 172.24.0.0/13
|
||
serviceDomain: cluster.local
|
||
|
||
# Settings for the OpenStack networking for the cluster
|
||
clusterNetworking:
|
||
# Custom nameservers to use for the hosts
|
||
dnsNameservers:
|
||
# Indicates if security groups should be managed by the cluster
|
||
manageSecurityGroups: true
|
||
# Indicates if the managed security groups should allow all in-cluster traffic
|
||
# The default CNI installed by the addons is Cilium, so this is true by default
|
||
allowAllInClusterTraffic: true
|
||
# The ID of the external network to use
|
||
# If not given, the external network will be detected
|
||
externalNetworkId:
|
||
# Details of the internal network to use
|
||
internalNetwork:
|
||
# Filter to find an existing network for the cluster internal network
|
||
# see Cluster API documentation for details
|
||
networkFilter:
|
||
# id: e63ca1a0-f69d-4fbf-b306-310857b1afe5
|
||
# name: tenant-internal-net
|
||
# Filter to find an existing subnet for the cluster internal network
|
||
# See Cluster API documentation for details
|
||
subnetFilter:
|
||
# The CIDR to use if creating a cluster network
|
||
# This is only used if neither of networkFilter and subnetFilter are given
|
||
nodeCidr: 192.168.3.0/24
|
||
|
||
# Settings for registry mirrors
|
||
registryMirrors: {}
|
||
# docker.io: ["https://registry.my.domain/v2/dockerhub-public"]
|
||
|
||
# Settings for node-level registry auth
|
||
registryAuth: {}
|
||
# registry-1.docker.io:
|
||
# username: "<username>"
|
||
# password: "<password>"
|
||
|
||
# A map of trusted CAs to add to the system trust on cluster nodes
|
||
trustedCAs: {}
|
||
# custom-ca: |
|
||
# -----BEGIN CERTIFICATE-----
|
||
# ...certificate data...
|
||
# -----END CERTIFICATE-----
|
||
|
||
# List of additional packages to install on cluster nodes
|
||
additionalPackages: []
|
||
# - nfs-common
|
||
|
||
# Settings for the Kubernetes API server
|
||
apiServer:
|
||
# Indicates whether to deploy a load balancer for the API server
|
||
enableLoadBalancer: true
|
||
# Indicates whether to associate a floating IP with the API server
|
||
associateFloatingIP: true
|
||
# The specific floating IP to associate with the API server
|
||
# If not given, a new IP will be allocated if required
|
||
floatingIP:
|
||
# The specific fixed IP to associate with the API server
|
||
# If enableLoadBalancer is true, this will become the VIP of the load balancer
|
||
# If enableLoadBalancer and associateFloatingIP are both false, this should be
|
||
# the IP of a pre-allocated port to be used as the VIP
|
||
fixedIP:
|
||
# The port to use for the API server
|
||
port: 6443
|
||
|
||
# Settings for the control plane
|
||
controlPlane:
|
||
# The failure domains to use for control plane nodes
|
||
# If given, should be a list of availability zones
|
||
# Only used when omitFailureDomain = false
|
||
failureDomains:
|
||
# Indicates whether the failure domain should be omitted from control plane nodes
|
||
omitFailureDomain: true
|
||
# The number of control plane machines to deploy
|
||
# For high-availability, this should be greater than 1
|
||
# For etcd quorum, it should be odd - usually 3, or 5 for very large clusters
|
||
machineCount: 3
|
||
# The flavor to use for control plane machines
|
||
machineFlavor:
|
||
# The networks and ports for control plane nodes
|
||
# If neither networks or ports are given, the cluster internal network is used
|
||
# See https://github.com/kubernetes-sigs/cluster-api-provider-openstack/blob/master/docs/book/src/clusteropenstack/configuration.md#network-filters
|
||
machineNetworking:
|
||
networks:
|
||
ports:
|
||
# The root volume spec for control plane machines
|
||
# If not given, the ephemeral root disk from the flavor is used
|
||
machineRootVolume:
|
||
# # The size of the disk to use
|
||
# diskSize:
|
||
# # The volume type to use
|
||
# # If not specified, the default volume type is used
|
||
# volumeType:
|
||
# # The volume availability zone to use
|
||
# # If not specified, the machine availability zone is used
|
||
# availabilityZone:
|
||
# The time to wait for a node to finish draining before it can be removed
|
||
nodeDrainTimeout: 5m0s
|
||
# The rollout strategy to use for the control plane nodes
|
||
|
||
# By default, the strategy allows the control plane to begin provisioning new nodes
|
||
# without first tearing down old ones
|
||
rolloutStrategy:
|
||
type: RollingUpdate
|
||
rollingUpdate:
|
||
# For the control plane, this can only be 0 or 1
|
||
maxSurge: 1
|
||
# The kubeadm config specification for the control plane
|
||
# By default, this uses a simple configuration that enables the external cloud provider
|
||
kubeadmConfigSpec:
|
||
initConfiguration:
|
||
nodeRegistration:
|
||
name: '{{ local_hostname }}'
|
||
kubeletExtraArgs:
|
||
cloud-provider: external
|
||
# As well as enabling an external cloud provider, we set the bind addresses for the
|
||
# etcd metrics, controller-manager, scheduler and kube-proxy to 0.0.0.0 so that Prometheus
|
||
# can reach them to collect metrics
|
||
clusterConfiguration:
|
||
etcd:
|
||
local:
|
||
extraArgs:
|
||
listen-metrics-urls: http://0.0.0.0:2381
|
||
apiServer:
|
||
extraArgs:
|
||
cloud-provider: external
|
||
controllerManager:
|
||
extraArgs:
|
||
cloud-provider: external
|
||
bind-address: 0.0.0.0
|
||
scheduler:
|
||
extraArgs:
|
||
bind-address: 0.0.0.0
|
||
joinConfiguration:
|
||
nodeRegistration:
|
||
name: '{{ local_hostname }}'
|
||
kubeletExtraArgs:
|
||
cloud-provider: external
|
||
kubeProxyConfiguration:
|
||
metricsBindAddress: 0.0.0.0:10249
|
||
# The machine health check for auto-healing of the control plane
|
||
# See https://cluster-api.sigs.k8s.io/tasks/healthcheck.html
|
||
healthCheck:
|
||
# Indicates if the machine health check should be enabled
|
||
enabled: true
|
||
# The spec for the health check
|
||
spec:
|
||
# By default, unhealthy control plane nodes are always remediated
|
||
maxUnhealthy: 100%
|
||
# By default, consider a control plane node that has not been Ready
|
||
# for more than 5 mins unhealthy
|
||
unhealthyConditions:
|
||
- type: Ready
|
||
status: Unknown
|
||
timeout: 5m0s
|
||
- type: Ready
|
||
status: "False"
|
||
timeout: 5m0s
|
||
|
||
# Defaults for node groups
|
||
# Each of these can be overridden in the specification for an individual node group
|
||
nodeGroupDefaults:
|
||
# Indicates if the node group should be autoscaled
|
||
autoscale: false
|
||
# The failure domain for the node group
|
||
failureDomain:
|
||
# The flavor to use for machines in the node group
|
||
machineFlavor:
|
||
# The default networks and ports for worker nodes
|
||
# If neither networks or ports are given, the cluster internal network is used
|
||
# See https://github.com/kubernetes-sigs/cluster-api-provider-openstack/blob/master/docs/book/src/clusteropenstack/configuration.md#network-filters
|
||
machineNetworking:
|
||
networks:
|
||
ports:
|
||
# The root volume spec for machines in the node group
|
||
# If not given, the ephemeral root disk from the flavor is used
|
||
machineRootVolume:
|
||
# # The size of the disk to use
|
||
# diskSize:
|
||
# # The volume type to use
|
||
# # If not specified, the default volume type is used
|
||
# volumeType:
|
||
# # The volume availability zone to use
|
||
# # If not specified, the machine availability zone is used
|
||
# availabilityZone:
|
||
# The time to wait for a node to finish draining before it can be removed
|
||
nodeDrainTimeout: 5m0s
|
||
# The rollout strategy to use for the node group
|
||
# By default, this is set to do a rolling update within the existing resource envelope
|
||
# of the node group, even if that means the node group temporarily has zero nodes
|
||
rolloutStrategy:
|
||
type: RollingUpdate
|
||
rollingUpdate:
|
||
# The maximum number of node group machines that can be unavailable during the update
|
||
# Can be an absolute number or a percentage of the desired count
|
||
maxUnavailable: 1
|
||
# The maximum number of machines that can be scheduled above the desired count for
|
||
# the group during an update
|
||
# Can be an absolute number or a percentage of the desired count
|
||
maxSurge: 0
|
||
# One of Random, Newest, Oldest
|
||
deletePolicy: Random
|
||
# The default kubeadm config specification for worker nodes
|
||
# This will be merged with any configuration given for specific node groups
|
||
# By default, this uses a simple configuration that enables the external cloud provider
|
||
kubeadmConfigSpec:
|
||
joinConfiguration:
|
||
nodeRegistration:
|
||
name: '{{ local_hostname }}'
|
||
kubeletExtraArgs:
|
||
cloud-provider: external
|
||
# The default machine health check for worker nodes
|
||
# See https://cluster-api.sigs.k8s.io/tasks/healthcheck.html
|
||
# Note that maxUnhealthy or unhealthRange are evaluated per node group
|
||
healthCheck:
|
||
# Indicates if the machine health check should be enabled
|
||
enabled: true
|
||
# The spec for the health check
|
||
spec:
|
||
# By default, unhealthy worker nodes are always remediated
|
||
maxUnhealthy: 100%
|
||
# If a node takes longer than 10 mins to startup, remediate it
|
||
nodeStartupTimeout: 10m0s
|
||
# By default, consider a worker node that has not been Ready for
|
||
# more than 5 mins unhealthy
|
||
unhealthyConditions:
|
||
- type: Ready
|
||
status: Unknown
|
||
timeout: 5m0s
|
||
- type: Ready
|
||
status: "False"
|
||
timeout: 5m0s
|
||
|
||
# The worker node groups for the cluster
|
||
nodeGroups:
|
||
- # The name of the node group
|
||
name: md-0
|
||
# The number of machines in the node group if autoscale is false
|
||
machineCount: 3
|
||
# The minimum and maximum number of machines in the node group if autoscale is true
|
||
# machineCountMin: 3
|
||
# machineCountMax: 3
|
||
|
||
# Configuration for the cluster autoscaler
|
||
autoscaler:
|
||
# The image to use for the autoscaler component
|
||
image:
|
||
# Defaults to the global image prefix if not given
|
||
prefix:
|
||
repository: k8s.gcr.io/autoscaling/cluster-autoscaler
|
||
pullPolicy: IfNotPresent
|
||
# The cluster-autoscaler docs recommend using a version that matches the
|
||
# Kubernetes version, but versions should be (mostly) backwards-compatible
|
||
tag: v1.26.1
|
||
imagePullSecrets: []
|
||
# Pod-level security context
|
||
podSecurityContext:
|
||
runAsNonRoot: true
|
||
runAsUser: 1001
|
||
# Container-level security context
|
||
securityContext:
|
||
allowPrivilegeEscalation: false
|
||
capabilities:
|
||
drop: [ALL]
|
||
readOnlyRootFilesystem: true
|
||
# Resource requests and limits for pods
|
||
resources: {}
|
||
# Node selector for pods
|
||
nodeSelector: {}
|
||
# Tolerations for pods
|
||
tolerations: []
|
||
# Topology spread constraints for pods
|
||
topologySpreadConstraints: []
|
||
# Affinity rules for pods
|
||
affinity: {}
|
||
|
||
# Configuration for cluster addons
|
||
addons:
|
||
# Indicates if cluster addons should be deployed
|
||
enabled: true
|
||
# Enable the openstack integrations by default
|
||
openstack:
|
||
enabled: true
|