[ceph] Add support for deploying and managing Ceph with Rook

This change adds an openstack-support-rook zuul job to test
deploying Ceph using the upstream Rook helm charts found in the
https://charts.rook.io/release repository. Minor changes to the
storage keyring manager job and the mon discovery service in the
ceph-mon chart are also included to allow the ceph-mon chart to be
used to generate auth keys and deploy the mon discovery service
necessary for OpenStack.

Change-Id: Iee4174dc54b6a7aac6520c448a54adb1325cccab
This commit is contained in:
Stephen Taylor 2023-05-24 14:59:10 -06:00
parent 56dd4fdb84
commit a58f80599b
26 changed files with 1282 additions and 2 deletions

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0 appVersion: v1.0.0
description: OpenStack-Helm Ceph Mon description: OpenStack-Helm Ceph Mon
name: ceph-mon name: ceph-mon
version: 0.1.30 version: 0.1.31
home: https://github.com/ceph/ceph home: https://github.com/ceph/ceph
... ...

View File

@ -28,7 +28,17 @@ function kube_ceph_keyring_gen () {
sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n' sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
} }
CEPH_CLIENT_KEY=""
ROOK_CEPH_TOOLS_POD=$(kubectl -n ${DEPLOYMENT_NAMESPACE} get pods --no-headers | awk '/rook-ceph-tools/{print $1}')
if [[ -n "${ROOK_CEPH_TOOLS_POD}" ]]; then
CEPH_AUTH_KEY_NAME=$(echo "${CEPH_KEYRING_NAME}" | awk -F. '{print $2 "." $3}')
CEPH_CLIENT_KEY=$(kubectl -n ${DEPLOYMENT_NAMESPACE} exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth ls | grep -A1 "${CEPH_AUTH_KEY_NAME}" | awk '/key:/{print $2}')
fi
if [[ -z "${CEPH_CLIENT_KEY}" ]]; then
CEPH_CLIENT_KEY=$(ceph_gen_key) CEPH_CLIENT_KEY=$(ceph_gen_key)
fi
function create_kube_key () { function create_kube_key () {
CEPH_KEYRING=$1 CEPH_KEYRING=$1

View File

@ -26,11 +26,14 @@ rules:
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- pods
- pods/exec
- secrets - secrets
verbs: verbs:
- get - get
- create - create
- patch - patch
- list
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding

View File

@ -30,7 +30,12 @@ spec:
protocol: TCP protocol: TCP
targetPort: {{ tuple "ceph_mon" "discovery" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }} targetPort: {{ tuple "ceph_mon" "discovery" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
selector: selector:
{{- if .Values.manifests.daemonset_mon }}
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }} {{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
{{- else }}
app: rook-ceph-mon
ceph_daemon_type: mon
{{- end }}
clusterIP: None clusterIP: None
publishNotReadyAddresses: true publishNotReadyAddresses: true
{{- end }} {{- end }}

View File

@ -31,4 +31,5 @@ ceph-mon:
- 0.1.28 Document the use of mon_allow_pool_size_one - 0.1.28 Document the use of mon_allow_pool_size_one
- 0.1.29 Update Ceph to 17.2.6 - 0.1.29 Update Ceph to 17.2.6
- 0.1.30 Use Helm tookkit functions for Ceph probes - 0.1.30 Use Helm tookkit functions for Ceph probes
- 0.1.31 Add Rook Helm charts for managing Ceph with Rook
... ...

View File

@ -0,0 +1 @@
../common/000-install-packages.sh

View File

@ -0,0 +1 @@
../common/prepare-k8s.sh

View File

@ -0,0 +1 @@
../../gate/deploy-k8s.sh

View File

@ -0,0 +1,24 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make namespace-config
#NOTE: Deploy namespace configs
for NAMESPACE in kube-system ceph openstack; do
helm upgrade --install ${NAMESPACE}-namespace-config ./namespace-config \
--namespace=${NAMESPACE}
done

View File

@ -0,0 +1,45 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make ingress
#NOTE: Deploy global ingress with IngressClass nginx-cluster
tee /tmp/ingress-kube-system.yaml <<EOF
deployment:
mode: cluster
type: DaemonSet
network:
host_namespace: true
EOF
helm upgrade --install ingress-kube-system ./ingress \
--namespace=kube-system \
--values=/tmp/ingress-kube-system.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Deploy namespace ingress
for NAMESPACE in ceph openstack; do
helm upgrade --install ingress-${NAMESPACE} ./ingress \
--namespace=${NAMESPACE} \
--set deployment.cluster.class=nginx-${NAMESPACE} \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_INGRESS_OPENSTACK}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ${NAMESPACE}
done

View File

@ -0,0 +1,716 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# Specify the Rook release tag to use for the Rook operator here
ROOK_RELEASE=v1.12.4
# setup loopback devices for ceph
free_loop_devices=( $(ls -1 /dev/loop[0-7] | while read loopdev; do losetup | grep -q $loopdev || echo $loopdev; done) )
./tools/deployment/common/setup-ceph-loopback-device.sh \
--ceph-osd-data ${CEPH_OSD_DATA_DEVICE:=${free_loop_devices[0]}} \
--ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=${free_loop_devices[1]}}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
fi
tee /tmp/rook.yaml <<EOF
image:
repository: rook/ceph
tag: ${ROOK_RELEASE}
pullPolicy: IfNotPresent
crds:
enabled: true
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations: []
unreachableNodeTolerationSeconds: 5
currentNamespaceOnly: false
annotations: {}
logLevel: INFO
rbacEnable: true
pspEnable: false
priorityClassName:
allowLoopDevices: true
csi:
enableRbdDriver: true
enableCephfsDriver: false
enableGrpcMetrics: false
enableCSIHostNetwork: true
enableCephfsSnapshotter: true
enableNFSSnapshotter: true
enableRBDSnapshotter: true
enablePluginSelinuxHostMount: false
enableCSIEncryption: false
pluginPriorityClassName: system-node-critical
provisionerPriorityClassName: system-cluster-critical
rbdFSGroupPolicy: "File"
cephFSFSGroupPolicy: "File"
nfsFSGroupPolicy: "File"
enableOMAPGenerator: false
cephFSKernelMountOptions:
enableMetadata: false
provisionerReplicas: 1
clusterName: ceph
logLevel: 0
sidecarLogLevel:
rbdPluginUpdateStrategy:
rbdPluginUpdateStrategyMaxUnavailable:
cephFSPluginUpdateStrategy:
nfsPluginUpdateStrategy:
grpcTimeoutInSeconds: 150
allowUnsupportedVersion: false
csiRBDPluginVolume:
csiRBDPluginVolumeMount:
csiCephFSPluginVolume:
csiCephFSPluginVolumeMount:
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 100m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 100m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 100m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 100m
- name : csi-rbdplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : csi-omap-generator
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
- name : csi-rbdplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 200m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 200m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 200m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 200m
- name : csi-cephfsplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
- name : csi-cephfsplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 200m
- name : csi-nfsplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
- name : csi-nfsplugin
resource:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 128Mi
cpu: 250m
provisionerTolerations:
provisionerNodeAffinity: #key1=value1,value2; key2=value3
pluginTolerations:
pluginNodeAffinity: # key1=value1,value2; key2=value3
enableLiveness: false
cephfsGrpcMetricsPort:
cephfsLivenessMetricsPort:
rbdGrpcMetricsPort:
csiAddonsPort:
forceCephFSKernelClient: true
rbdLivenessMetricsPort:
kubeletDirPath:
cephcsi:
image:
registrar:
image:
provisioner:
image:
snapshotter:
image:
attacher:
image:
resizer:
image:
imagePullPolicy: IfNotPresent
cephfsPodLabels: #"key1=value1,key2=value2"
nfsPodLabels: #"key1=value1,key2=value2"
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
enabled: false
image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
nfs:
enabled: false
topology:
enabled: false
domainLabels:
readAffinity:
enabled: false
crushLocationLabels:
cephFSAttachRequired: true
rbdAttachRequired: true
nfsAttachRequired: true
enableDiscoveryDaemon: false
cephCommandsTimeoutSeconds: "15"
useOperatorHostNetwork:
discover:
toleration:
tolerationKey:
tolerations:
nodeAffinity: # key1=value1,value2; key2=value3
podLabels: # "key1=value1,key2=value2"
resources:
disableAdmissionController: true
hostpathRequiresPrivileged: false
disableDeviceHotplug: false
discoverDaemonUdev:
imagePullSecrets:
enableOBCWatchOperatorNamespace: true
admissionController:
monitoring:
enabled: false
EOF
helm repo add rook-release https://charts.rook.io/release
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph --version ${ROOK_RELEASE} -f /tmp/rook.yaml
./tools/deployment/common/wait-for-pods.sh rook-ceph
tee /tmp/ceph.yaml <<EOF
operatorNamespace: rook-ceph
clusterName: ceph
kubeVersion:
configOverride: |
[global]
mon_allow_pool_delete = true
mon_allow_pool_size_one = true
osd_pool_default_size = 1
osd_pool_default_min_size = 1
mon_warn_on_pool_no_redundancy = false
auth_allow_insecure_global_id_reclaim = false
toolbox:
enabled: true
tolerations: []
affinity: {}
resources:
limits:
cpu: "100m"
memory: "64Mi"
requests:
cpu: "100m"
memory: "64Mi"
priorityClassName:
monitoring:
enabled: false
createPrometheusRules: false
rulesNamespaceOverride:
prometheusRule:
labels: {}
annotations: {}
pspEnable: false
cephClusterSpec:
cephVersion:
image: quay.io/ceph/ceph:v17.2.6
allowUnsupported: false
dataDirHostPath: /var/lib/rook
skipUpgradeChecks: false
continueUpgradeAfterChecksEvenIfNotHealthy: false
waitTimeoutForHealthyOSDInMinutes: 10
mon:
count: 1
allowMultiplePerNode: false
mgr:
count: 1
allowMultiplePerNode: false
modules:
- name: pg_autoscaler
enabled: true
- name: dashboard
enabled: false
- name: nfs
enabled: false
dashboard:
enabled: true
ssl: true
network:
connections:
encryption:
enabled: false
compression:
enabled: false
requireMsgr2: false
provider: host
crashCollector:
disable: true
logCollector:
enabled: true
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
cleanupPolicy:
confirmation: ""
sanitizeDisks:
method: quick
dataSource: zero
iteration: 1
allowUninstallWithVolumes: false
resources:
mgr:
limits:
cpu: "250m"
memory: "512Mi"
requests:
cpu: "250m"
memory: "5Mi"
mon:
limits:
cpu: "250m"
memory: "100Mi"
requests:
cpu: "250m"
memory: "50Mi"
osd:
limits:
cpu: "500m"
memory: "2Gi"
requests:
cpu: "500m"
memory: "1Gi"
prepareosd:
requests:
cpu: "500m"
memory: "50Mi"
mgr-sidecar:
limits:
cpu: "200m"
memory: "50Mi"
requests:
cpu: "100m"
memory: "5Mi"
crashcollector:
limits:
cpu: "200m"
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
cpu: "200m"
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
cpu: "250m"
memory: "1Gi"
requests:
cpu: "250m"
memory: "100Mi"
removeOSDsIfOutAndSafeToRemove: false
priorityClassNames:
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: false
devices:
- name: "${CEPH_OSD_DATA_DEVICE}"
config:
metadataDevice: "${CEPH_OSD_DB_WAL_DEVICE}"
databaseSizeMB: "5120"
walSizeMB: "2048"
disruptionManagement:
managePodBudgets: true
osdMaintenanceTimeout: 30
pgHealthCheckTimeout: 0
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
ingress:
dashboard:
{}
cephBlockPools:
- name: rbd
namespace: ceph
spec:
failureDomain: host
replicated:
size: 1
storageClass:
enabled: true
name: general
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
allowedTopologies: []
parameters:
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: cephfs
namespace: ceph
spec:
metadataPool:
replicated:
size: 1
dataPools:
- failureDomain: host
replicated:
size: 1
name: data
metadataServer:
activeCount: 1
activeStandby: false
resources:
limits:
cpu: "250m"
memory: "50Mi"
requests:
cpu: "250m"
memory: "10Mi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: general
isDefault: false
deletionPolicy: Delete
annotations: {}
labels: {}
parameters: {}
cephObjectStores:
- name: default
namespace: ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 1
dataPool:
failureDomain: host
replicated:
size: 1
preservePoolsOnDelete: true
gateway:
port: 8080
resources:
limits:
cpu: "500m"
memory: "128Mi"
requests:
cpu: "500m"
memory: "32Mi"
instances: 1
priorityClassName: system-cluster-critical
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
volumeBindingMode: "Immediate"
parameters:
region: us-east-1
storageclass:
rbd:
parameters:
adminSecretName: pvc-ceph-conf-combined-storageclass
cephfs:
provision_storage_class: true
provisioner: ceph.com/cephfs
metadata:
name: cephfs
parameters:
adminId: admin
userSecretName: pvc-ceph-cephfs-client-key
adminSecretName: pvc-ceph-conf-combined-storageclass
adminSecretNamespace: ceph
EOF
helm upgrade --install --create-namespace --namespace ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster --version ${ROOK_RELEASE} -f /tmp/ceph.yaml
#NOTE: Wait for deploy
RGW_POD=$(kubectl get pods \
--namespace=ceph \
--selector="app=rook-ceph-rgw" \
--no-headers | awk '{print $1; exit}')
while [[ -z "${RGW_POD}" ]]
do
sleep 5
RGW_POD=$(kubectl get pods \
--namespace=ceph \
--selector="app=rook-ceph-rgw" \
--no-headers | awk '{print $1; exit}')
done
./tools/deployment/common/wait-for-pods.sh ceph
#NOTE: Validate deploy
TOOLS_POD=$(kubectl get pods \
--namespace=ceph \
--selector="app=rook-ceph-tools" \
--no-headers | awk '{ print $1; exit }')
kubectl exec -n ceph ${TOOLS_POD} -- ceph -s
tee /tmp/ceph-supplemental.yaml <<EOF
endpoints:
ceph_mon:
namespace: null
hosts:
default: rook-ceph-mon-a
discovery: ceph-mon-discovery
port:
mon:
default: 6789
mon_msgr2:
default: 3300
deployment:
storage_secrets: true
ceph: true
csi_rbd_provisioner: false
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: false
manifests:
daemonset_mon: false
daemonset_osd: false
deployment_checkdns: true
deployment_mds: false
deployment_mgr: false
deployment_mgr_sa: false
deployment_moncheck: false
helm_tests: false
job_bootstrap: false
job_storage_admin_keys: true
service_mgr: false
service_mon: false
service_mon_discovery: true
EOF
helm upgrade --install ceph-mon ./ceph-mon --namespace=ceph --values=/tmp/ceph-supplemental.yaml
./tools/deployment/common/wait-for-pods.sh ceph

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make ceph-provisioners
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/ceph-openstack-config.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
storage_secrets: false
ceph: false
csi_rbd_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: false
conf:
rgw_ks:
enabled: false
EOF
: ${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"}
helm upgrade --install ceph-openstack-config ./ceph-provisioners \
--namespace=openstack \
--values=/tmp/ceph-openstack-config.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
helm test ceph-openstack-config --namespace openstack --timeout 600s
#NOTE: Validate Deployment info
kubectl get -n openstack jobs
kubectl get -n openstack secrets
kubectl get -n openstack configmaps

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_INFRA_EXTRA_HELM_ARGS_RABBITMQ:="$(./tools/deployment/common/get-values-overrides.sh rabbitmq)"}
#NOTE: Lint and package chart
make rabbitmq
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install rabbitmq ./rabbitmq \
--namespace=openstack \
--recreate-pods \
--force \
--set network.management.ingress.classes.namespace=nginx-openstack \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_RABBITMQ}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
helm test rabbitmq --namespace openstack

View File

@ -0,0 +1,30 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_INFRA_EXTRA_HELM_ARGS_MEMCACHED:="$(./tools/deployment/common/get-values-overrides.sh memcached)"}
#NOTE: Lint and package chart
make memcached
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install memcached ./memcached \
--namespace=openstack \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_MEMCACHED}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_INFRA_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
#NOTE: Lint and package chart
make libvirt
#NOTE: Deploy command
helm upgrade --install libvirt ./libvirt \
--namespace=openstack \
--set network.backend="null" \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_LIBVIRT}
#NOTE: Please be aware that a network backend might affect
#The loadability of this, as some need to be asynchronously
#loaded. See also:
#https://github.com/openstack/openstack-helm-infra/blob/b69584bd658ae5cb6744e499975f9c5a505774e5/libvirt/values.yaml#L151-L172
if [[ "${WAIT_FOR_PODS:=True}" == "True" ]]; then
./tools/deployment/common/wait-for-pods.sh openstack
fi

View File

@ -0,0 +1,76 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_INFRA_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
CERT_DIR=$(mktemp -d)
cd ${CERT_DIR}
openssl req -x509 -new -nodes -days 1 -newkey rsa:2048 -keyout cacert.key -out cacert.pem -subj "/CN=libvirt.org"
openssl req -newkey rsa:2048 -days 1 -nodes -keyout client-key.pem -out client-req.pem -subj "/CN=libvirt.org"
openssl rsa -in client-key.pem -out client-key.pem
openssl x509 -req -in client-req.pem -days 1 \
-CA cacert.pem -CAkey cacert.key -set_serial 01 \
-out client-cert.pem
openssl req -newkey rsa:2048 -days 1 -nodes -keyout server-key.pem -out server-req.pem -subj "/CN=libvirt.org"
openssl rsa -in server-key.pem -out server-key.pem
openssl x509 -req -in server-req.pem -days 1 \
-CA cacert.pem -CAkey cacert.key -set_serial 01 \
-out server-cert.pem
cd -
cat <<EOF | kubectl apply -f-
apiVersion: v1
kind: Secret
metadata:
name: libvirt-tls-client
namespace: openstack
type: Opaque
data:
cacert.pem: $(cat ${CERT_DIR}/cacert.pem | base64 -w0)
clientcert.pem: $(cat ${CERT_DIR}/client-cert.pem | base64 -w0)
clientkey.pem: $(cat ${CERT_DIR}/client-key.pem | base64 -w0)
EOF
cat <<EOF | kubectl apply -f-
apiVersion: v1
kind: Secret
metadata:
name: libvirt-tls-server
namespace: openstack
type: Opaque
data:
cacert.pem: $(cat ${CERT_DIR}/cacert.pem | base64 -w0)
servercert.pem: $(cat ${CERT_DIR}/server-cert.pem | base64 -w0)
serverkey.pem: $(cat ${CERT_DIR}/server-key.pem | base64 -w0)
EOF
#NOTE: Lint and package chart
make libvirt
#NOTE: Deploy command
helm upgrade --install libvirt ./libvirt \
--namespace=openstack \
--set network.backend="null" \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_LIBVIRT}
#NOTE: Please be aware that a network backend might affect
#The loadability of this, as some need to be asynchronously
#loaded. See also:
#https://github.com/openstack/openstack-helm-infra/blob/b69584bd658ae5cb6744e499975f9c5a505774e5/libvirt/values.yaml#L151-L172
if [[ "${WAIT_FOR_PODS:=True}" == "True" ]]; then
./tools/deployment/common/wait-for-pods.sh openstack
fi

View File

@ -0,0 +1,25 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:="$(./tools/deployment/common/get-values-overrides.sh openvswitch)"}
#NOTE: Deploy command
helm upgrade --install openvswitch ./openvswitch \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_OPENVSWITCH}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -0,0 +1 @@
../keystone-auth/060-mariadb.sh

View File

@ -0,0 +1 @@
../keystone-auth/010-setup-client.sh

View File

@ -0,0 +1 @@
../keystone-auth/070-keystone.sh

View File

@ -0,0 +1,63 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
make -C ${OSH_INFRA_PATH} ceph-rgw
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/radosgw-openstack.yaml <<EOF
endpoints:
identity:
namespace: openstack
object_store:
namespace: openstack
ceph_mon:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
ceph: true
rgw_keystone_user_and_endpoints: true
bootstrap:
enabled: false
conf:
rgw_ks:
enabled: true
pod:
replicas:
rgw: 1
EOF
helm upgrade --install radosgw-openstack ${OSH_INFRA_PATH}/ceph-rgw \
--namespace=openstack \
--values=/tmp/radosgw-openstack.yaml \
--set network.api.ingress.classes.namespace=nginx-openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH_RGW}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
sleep 60 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack service list
openstack endpoint list
helm test radosgw-openstack --namespace openstack --timeout 900s

View File

@ -0,0 +1,29 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make prometheus-openstack-exporter
: ${OSH_INFRA_EXTRA_HELM_ARGS_OS_EXPORTER:="$(./tools/deployment/common/get-values-overrides.sh prometheus-openstack-exporter)"}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install prometheus-openstack-exporter \
./prometheus-openstack-exporter \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_OS_EXPORTER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -0,0 +1,28 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Lint and package chart
make powerdns
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install powerdns ./powerdns \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_POWERDNS}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -0,0 +1,63 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
: ${OSH_PATH:="../openstack-helm"}
: ${OSH_INFRA_EXTRA_HELM_ARGS:=""}
: ${OSH_EXTRA_HELM_ARGS:=""}
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_CINDER:="$(./tools/deployment/common/get-values-overrides.sh cinder)"}
#NOTE: Lint and package chart
cd ${OSH_PATH}
make cinder
cd -
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/cinder.yaml <<EOF
conf:
ceph:
pools:
backup:
replication: 1
crush_rule: rbd
chunk_size: 8
app_name: cinder-backup
cinder.volumes:
replication: 1
crush_rule: rbd
chunk_size: 8
app_name: cinder-volume
EOF
helm upgrade --install cinder ${OSH_PATH}/cinder \
--namespace=openstack \
--values=/tmp/cinder.yaml \
--set network.api.ingress.classes.namespace=nginx-openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CINDER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack volume type list
kubectl delete pods -l application=cinder,release_group=cinder,component=test --namespace=openstack --ignore-not-found
helm test cinder --namespace openstack --timeout 900s

View File

@ -197,6 +197,33 @@
- ./tools/deployment/openstack-support/120-powerdns.sh - ./tools/deployment/openstack-support/120-powerdns.sh
- ./tools/deployment/openstack-support/130-cinder.sh - ./tools/deployment/openstack-support/130-cinder.sh
- job:
name: openstack-helm-infra-openstack-support-rook
parent: openstack-helm-infra-deploy
nodeset: openstack-helm-1node-32GB-ubuntu_focal
vars:
osh_params:
openstack_release: "2023.1"
container_distro_name: ubuntu
container_distro_version: focal
gate_scripts:
- ./tools/deployment/openstack-support-rook/000-prepare-k8s.sh
- ./tools/deployment/openstack-support-rook/007-namespace-config.sh
- ./tools/deployment/openstack-support-rook/010-ingress.sh
- ./tools/deployment/openstack-support-rook/020-ceph.sh
- ./tools/deployment/openstack-support-rook/025-ceph-ns-activate.sh
- ./tools/deployment/openstack-support-rook/030-rabbitmq.sh
- ./tools/deployment/openstack-support-rook/070-mariadb.sh
- ./tools/deployment/openstack-support-rook/040-memcached.sh
- ./tools/deployment/openstack-support-rook/050-libvirt.sh
- ./tools/deployment/openstack-support-rook/060-openvswitch.sh
- ./tools/deployment/openstack-support-rook/080-setup-client.sh
- ./tools/deployment/openstack-support-rook/090-keystone.sh
- ./tools/deployment/openstack-support-rook/100-ceph-radosgateway.sh
- ./tools/deployment/openstack-support-rook/110-openstack-exporter.sh
- ./tools/deployment/openstack-support-rook/120-powerdns.sh
- ./tools/deployment/openstack-support-rook/130-cinder.sh
# Use libvirt ssl # Use libvirt ssl
- job: - job:
name: openstack-helm-infra-openstack-support-ssl name: openstack-helm-infra-openstack-support-ssl

View File

@ -25,6 +25,7 @@
- openstack-helm-infra-logging - openstack-helm-infra-logging
- openstack-helm-infra-monitoring - openstack-helm-infra-monitoring
- openstack-helm-infra-openstack-support - openstack-helm-infra-openstack-support
- openstack-helm-infra-openstack-support-rook
- openstack-helm-infra-openstack-support-ssl - openstack-helm-infra-openstack-support-ssl
- openstack-helm-infra-metacontroller - openstack-helm-infra-metacontroller
gate: gate:
@ -34,6 +35,7 @@
- openstack-helm-infra-logging - openstack-helm-infra-logging
- openstack-helm-infra-monitoring - openstack-helm-infra-monitoring
- openstack-helm-infra-openstack-support - openstack-helm-infra-openstack-support
- openstack-helm-infra-openstack-support-rook
- openstack-helm-infra-openstack-support-ssl - openstack-helm-infra-openstack-support-ssl
post: post:
jobs: jobs: