From d070774bfcc514359c68519a06f07ef4e0b50a66 Mon Sep 17 00:00:00 2001 From: Stephen Taylor Date: Wed, 25 Oct 2023 11:08:19 -0600 Subject: [PATCH] [ceph-rgw] Add a ceph-rgw-pool job to re-run the ceph-rbd-pool job The Reef release disallows internal pools from being created by clients, which means the ceph-client chart is no longer able to create the .rgw.root pool and configure it. The new ceph-rgw-pool job deletes and re-creates the ceph-rbd-pool job after ceph-rgw has been deployed so that job can configure the .rgw.root pool correctly. Change-Id: Ic3b9d26de566fe379227a2fe14dc061248e84a4c --- ceph-rgw/Chart.yaml | 2 +- .../templates/bin/rgw/_rerun-pool-job.sh.tpl | 46 +++++++ ceph-rgw/templates/configmap-bin.yaml | 2 + ceph-rgw/templates/job-rgw-pool.yaml | 115 ++++++++++++++++++ ceph-rgw/values.yaml | 23 ++++ ceph-rgw/values_overrides/apparmor.yaml | 3 + releasenotes/notes/ceph-rgw.yaml | 1 + 7 files changed, 191 insertions(+), 1 deletion(-) create mode 100644 ceph-rgw/templates/bin/rgw/_rerun-pool-job.sh.tpl create mode 100644 ceph-rgw/templates/job-rgw-pool.yaml diff --git a/ceph-rgw/Chart.yaml b/ceph-rgw/Chart.yaml index d1bfbae13..f24c29208 100644 --- a/ceph-rgw/Chart.yaml +++ b/ceph-rgw/Chart.yaml @@ -15,6 +15,6 @@ apiVersion: v1 appVersion: v1.0.0 description: OpenStack-Helm Ceph RadosGW name: ceph-rgw -version: 0.1.30 +version: 0.1.31 home: https://github.com/ceph/ceph ... diff --git a/ceph-rgw/templates/bin/rgw/_rerun-pool-job.sh.tpl b/ceph-rgw/templates/bin/rgw/_rerun-pool-job.sh.tpl new file mode 100644 index 000000000..30415f90f --- /dev/null +++ b/ceph-rgw/templates/bin/rgw/_rerun-pool-job.sh.tpl @@ -0,0 +1,46 @@ +#!/bin/bash + +{{/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +set -ex + +# Get the Ceph cluster namespace, assuming "ceph" if not defined +{{- if empty .Values.endpoints.ceph_mon.namespace -}} +CEPH_NS=ceph +{{ else }} +CEPH_NS={{ .Values.endpoints.ceph_mon.namespace }} +{{- end }} + +# If the ceph-rbd pool job exists, delete it and re-create it +# NOTE: This check is currently required to handle the Rook case properly. +# Other charts still deploy ceph-rgw outside of Rook, and Rook does not +# have a ceph-rbd-pool job to re-run. +if [[ -n "$(kubectl -n ${CEPH_NS} get jobs | grep ceph-rbd-pool)" ]] +then + kubectl -n ${CEPH_NS} get job ceph-rbd-pool -o json > /tmp/ceph-rbd-pool.json + kubectl -n ${CEPH_NS} delete job ceph-rbd-pool + jq 'del(.spec.selector) | + del(.spec.template.metadata.creationTimestamp) | + del(.spec.template.metadata.labels) | + del(.metadata.creationTimestamp) | + del(.metadata.uid) | + del(.status)' /tmp/ceph-rbd-pool.json | \ + kubectl create -f - + + while [[ -z "$(kubectl -n ${CEPH_NS} get pods | grep ceph-rbd-pool | grep Completed)" ]] + do + sleep 5 + done +fi diff --git a/ceph-rgw/templates/configmap-bin.yaml b/ceph-rgw/templates/configmap-bin.yaml index 666cc16dc..aa970d410 100644 --- a/ceph-rgw/templates/configmap-bin.yaml +++ b/ceph-rgw/templates/configmap-bin.yaml @@ -38,6 +38,8 @@ data: {{ tuple "bin/rgw/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} rgw-init.sh: | {{ tuple "bin/rgw/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + rerun-pool-job.sh: | +{{ tuple "bin/rgw/_rerun-pool-job.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} storage-init.sh: | {{ tuple "bin/_ceph-rgw-storage-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} ceph-admin-keyring.sh: | diff --git a/ceph-rgw/templates/job-rgw-pool.yaml b/ceph-rgw/templates/job-rgw-pool.yaml new file mode 100644 index 000000000..c96e4c69f --- /dev/null +++ b/ceph-rgw/templates/job-rgw-pool.yaml @@ -0,0 +1,115 @@ +{{/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +# This job is required for Reef and later because Ceph now disallows the +# creation of internal pools (pools names beginning with a ".") and the +# ceph-rbd-pool job therefore can't configure them if they don't yet exist. +# This job simply deletes and re-creates the ceph-rbd-pool job after deploying +# ceph-rgw so it can apply the correct configuration to the .rgw.root pool. + +{{- if and .Values.manifests.job_rgw_pool .Values.deployment.ceph }} +{{- $envAll := . }} + +{{- $serviceAccountName := "ceph-rgw-pool" }} +{{ tuple $envAll "rgw_pool" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $serviceAccountName }} +rules: + - apiGroups: + - '' + resources: + - pods + - jobs + verbs: + - create + - get + - delete + - list + - apiGroups: + - 'batch' + resources: + - jobs + verbs: + - create + - get + - delete + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ $serviceAccountName }} +subjects: + - kind: ServiceAccount + name: {{ $serviceAccountName }} + namespace: {{ $envAll.Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ $serviceAccountName }} + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ceph-rgw-pool + labels: +{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }} + annotations: + {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }} +spec: + template: + metadata: + name: ceph-rgw-pool + labels: +{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + annotations: +{{ dict "envAll" $envAll "podName" "ceph-rgw-pool" "containerNames" (list "ceph-rgw-pool" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }} + spec: +{{ dict "envAll" $envAll "application" "rgw_pool" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }} + serviceAccountName: {{ $serviceAccountName }} + restartPolicy: {{ $envAll.Values.jobs.rgw_pool.restartPolicy | quote }} + affinity: +{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }} + nodeSelector: + {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }} + initContainers: +{{ tuple $envAll "rgw_pool" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: ceph-rgw-pool +{{ tuple $envAll "ceph_rgw_pool" | include "helm-toolkit.snippets.image" | indent 10 }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_pool | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} +{{ dict "envAll" $envAll "application" "rgw_pool" "container" "rgw_pool" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }} + command: + - /tmp/rerun-pool-job.sh + volumeMounts: + - name: pod-tmp + mountPath: /tmp + - name: ceph-rgw-bin + mountPath: /tmp/rerun-pool-job.sh + subPath: rerun-pool-job.sh + readOnly: true + volumes: + - name: pod-tmp + emptyDir: {} + - name: ceph-rgw-bin + configMap: + name: ceph-rgw-bin + defaultMode: 0555 + - name: pod-run + emptyDir: + medium: "Memory" +{{- end }} diff --git a/ceph-rgw/values.yaml b/ceph-rgw/values.yaml index 0c30f97f2..fc181452f 100644 --- a/ceph-rgw/values.yaml +++ b/ceph-rgw/values.yaml @@ -27,6 +27,7 @@ images: ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_focal_18.2.0-1-20231013' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013' ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_focal_18.2.0-1-20231013' + ceph_rgw_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' image_repo_sync: 'docker.io/library/docker:17.07.0' rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013' @@ -123,6 +124,13 @@ pod: bootstrap: allowPrivilegeEscalation: false readOnlyRootFilesystem: true + rgw_pool: + pod: + runAsUser: 65534 + container: + rgw_pool: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true dns_policy: "ClusterFirstWithHostNet" replicas: rgw: 2 @@ -215,6 +223,13 @@ pod: requests: memory: "128Mi" cpu: "500m" + rgw_pool: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" tests: requests: memory: "128Mi" @@ -533,6 +548,9 @@ dependencies: services: - endpoint: internal service: ceph_object_store + rgw_pool: + jobs: + - ceph-rgw-storage-init tests: services: - endpoint: internal @@ -692,6 +710,10 @@ endpoints: default: 53 protocol: UDP +jobs: + rgw_pool: + restartPolicy: OnFailure + manifests: certificates: false configmap_ceph_templates: true @@ -710,6 +732,7 @@ manifests: job_ks_user: true job_s3_admin: true job_rgw_placement_targets: false + job_rgw_pool: true secret_s3_rgw: true secret_keystone_rgw: true secret_ingress_tls: true diff --git a/ceph-rgw/values_overrides/apparmor.yaml b/ceph-rgw/values_overrides/apparmor.yaml index 64f34de04..be6935f74 100644 --- a/ceph-rgw/values_overrides/apparmor.yaml +++ b/ceph-rgw/values_overrides/apparmor.yaml @@ -19,6 +19,9 @@ pod: ceph-keyring-placement: runtime/default init: runtime/default create-s3-admin: runtime/default + ceph-rgw-pool: + ceph-rgw-pool: runtime/default + init: runtime/default ceph-rgw-test: ceph-rgw-ks-validation: runtime/default ceph-rgw-s3-validation: runtime/default diff --git a/releasenotes/notes/ceph-rgw.yaml b/releasenotes/notes/ceph-rgw.yaml index 9e545f7a4..d12236d25 100644 --- a/releasenotes/notes/ceph-rgw.yaml +++ b/releasenotes/notes/ceph-rgw.yaml @@ -31,4 +31,5 @@ ceph-rgw: - 0.1.28 Use Helm toolkit functions for Ceph probes - 0.1.29 Add 2023.1 Ubuntu Focal overrides - 0.1.30 Update Rook to 1.12.5 and Ceph to 18.2.0 + - 0.1.31 Add a ceph-rgw-pool job to manage RGW pools ...