[ceph-rgw] Add a ceph-rgw-pool job to re-run the ceph-rbd-pool job

The Reef release disallows internal pools from being created by
clients, which means the ceph-client chart is no longer able to
create the .rgw.root pool and configure it. The new ceph-rgw-pool
job deletes and re-creates the ceph-rbd-pool job after ceph-rgw has
been deployed so that job can configure the .rgw.root pool
correctly.

Change-Id: Ic3b9d26de566fe379227a2fe14dc061248e84a4c
This commit is contained in:
Stephen Taylor 2023-10-25 11:08:19 -06:00
parent bad0169ece
commit d070774bfc
7 changed files with 191 additions and 1 deletions

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph RadosGW
name: ceph-rgw
version: 0.1.30
version: 0.1.31
home: https://github.com/ceph/ceph
...

View File

@ -0,0 +1,46 @@
#!/bin/bash
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
# Get the Ceph cluster namespace, assuming "ceph" if not defined
{{- if empty .Values.endpoints.ceph_mon.namespace -}}
CEPH_NS=ceph
{{ else }}
CEPH_NS={{ .Values.endpoints.ceph_mon.namespace }}
{{- end }}
# If the ceph-rbd pool job exists, delete it and re-create it
# NOTE: This check is currently required to handle the Rook case properly.
# Other charts still deploy ceph-rgw outside of Rook, and Rook does not
# have a ceph-rbd-pool job to re-run.
if [[ -n "$(kubectl -n ${CEPH_NS} get jobs | grep ceph-rbd-pool)" ]]
then
kubectl -n ${CEPH_NS} get job ceph-rbd-pool -o json > /tmp/ceph-rbd-pool.json
kubectl -n ${CEPH_NS} delete job ceph-rbd-pool
jq 'del(.spec.selector) |
del(.spec.template.metadata.creationTimestamp) |
del(.spec.template.metadata.labels) |
del(.metadata.creationTimestamp) |
del(.metadata.uid) |
del(.status)' /tmp/ceph-rbd-pool.json | \
kubectl create -f -
while [[ -z "$(kubectl -n ${CEPH_NS} get pods | grep ceph-rbd-pool | grep Completed)" ]]
do
sleep 5
done
fi

View File

@ -38,6 +38,8 @@ data:
{{ tuple "bin/rgw/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rgw-init.sh: |
{{ tuple "bin/rgw/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
rerun-pool-job.sh: |
{{ tuple "bin/rgw/_rerun-pool-job.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
storage-init.sh: |
{{ tuple "bin/_ceph-rgw-storage-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-admin-keyring.sh: |

View File

@ -0,0 +1,115 @@
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This job is required for Reef and later because Ceph now disallows the
# creation of internal pools (pools names beginning with a ".") and the
# ceph-rbd-pool job therefore can't configure them if they don't yet exist.
# This job simply deletes and re-creates the ceph-rbd-pool job after deploying
# ceph-rgw so it can apply the correct configuration to the .rgw.root pool.
{{- if and .Values.manifests.job_rgw_pool .Values.deployment.ceph }}
{{- $envAll := . }}
{{- $serviceAccountName := "ceph-rgw-pool" }}
{{ tuple $envAll "rgw_pool" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $serviceAccountName }}
rules:
- apiGroups:
- ''
resources:
- pods
- jobs
verbs:
- create
- get
- delete
- list
- apiGroups:
- 'batch'
resources:
- jobs
verbs:
- create
- get
- delete
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $serviceAccountName }}
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ $envAll.Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ $serviceAccountName }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: ceph-rgw-pool
labels:
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
spec:
template:
metadata:
name: ceph-rgw-pool
labels:
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ dict "envAll" $envAll "podName" "ceph-rgw-pool" "containerNames" (list "ceph-rgw-pool" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
spec:
{{ dict "envAll" $envAll "application" "rgw_pool" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: {{ $envAll.Values.jobs.rgw_pool.restartPolicy | quote }}
affinity:
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
initContainers:
{{ tuple $envAll "rgw_pool" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: ceph-rgw-pool
{{ tuple $envAll "ceph_rgw_pool" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_pool | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "rgw_pool" "container" "rgw_pool" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/rerun-pool-job.sh
volumeMounts:
- name: pod-tmp
mountPath: /tmp
- name: ceph-rgw-bin
mountPath: /tmp/rerun-pool-job.sh
subPath: rerun-pool-job.sh
readOnly: true
volumes:
- name: pod-tmp
emptyDir: {}
- name: ceph-rgw-bin
configMap:
name: ceph-rgw-bin
defaultMode: 0555
- name: pod-run
emptyDir:
medium: "Memory"
{{- end }}

View File

@ -27,6 +27,7 @@ images:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_focal_18.2.0-1-20231013'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013'
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_focal_18.2.0-1-20231013'
ceph_rgw_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/library/docker:17.07.0'
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013'
@ -123,6 +124,13 @@ pod:
bootstrap:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_pool:
pod:
runAsUser: 65534
container:
rgw_pool:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
dns_policy: "ClusterFirstWithHostNet"
replicas:
rgw: 2
@ -215,6 +223,13 @@ pod:
requests:
memory: "128Mi"
cpu: "500m"
rgw_pool:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
@ -533,6 +548,9 @@ dependencies:
services:
- endpoint: internal
service: ceph_object_store
rgw_pool:
jobs:
- ceph-rgw-storage-init
tests:
services:
- endpoint: internal
@ -692,6 +710,10 @@ endpoints:
default: 53
protocol: UDP
jobs:
rgw_pool:
restartPolicy: OnFailure
manifests:
certificates: false
configmap_ceph_templates: true
@ -710,6 +732,7 @@ manifests:
job_ks_user: true
job_s3_admin: true
job_rgw_placement_targets: false
job_rgw_pool: true
secret_s3_rgw: true
secret_keystone_rgw: true
secret_ingress_tls: true

View File

@ -19,6 +19,9 @@ pod:
ceph-keyring-placement: runtime/default
init: runtime/default
create-s3-admin: runtime/default
ceph-rgw-pool:
ceph-rgw-pool: runtime/default
init: runtime/default
ceph-rgw-test:
ceph-rgw-ks-validation: runtime/default
ceph-rgw-s3-validation: runtime/default

View File

@ -31,4 +31,5 @@ ceph-rgw:
- 0.1.28 Use Helm toolkit functions for Ceph probes
- 0.1.29 Add 2023.1 Ubuntu Focal overrides
- 0.1.30 Update Rook to 1.12.5 and Ceph to 18.2.0
- 0.1.31 Add a ceph-rgw-pool job to manage RGW pools
...