Files
Gustavo Ornaghi Antunes 63894e875a Improve ceph-mgr-provision job in rook-ceph app
This change includes an improvement related to the timeout when running
the ceph-mgr-provision job. If the ceph cluster takes longer than
5/7 minutes to become available, the job will fail and cause the
application to get stuck at 67%.

Test Plan:
 - PASS: Upload/Apply rook-ceph app

Story: 2011066
Task: 50930

Change-Id: Ib4ce9ee48e1a8e663fa882485bd5108ecd476274
Signed-off-by: Gustavo Ornaghi Antunes <gustavo.ornaghiantunes@windriver.com>
2024-08-30 12:05:21 -03:00

110 lines
3.0 KiB
YAML

#
# Copyright (c) 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
global:
configmap_key_init: ceph-key-init-bin
#
provision_storage: true
cephfs_storage: true
job_ceph_mgr_provision: true
job_ceph_mon_audit: false
job_ceph_osd_audit: true
job_host_provision: true
job_cleanup: true
deployment_stx_ceph_manager: true
# Defines whether to generate service account and role bindings.
rbac: true
# Node Selector
nodeSelector: { node-role.kubernetes.io/control-plane: "" }
#
# RBAC options.
# Defaults should be fine in most cases.
rbac:
clusterRole: rook-ceph-provisioner
clusterRoleBinding: rook-ceph-provisioner
role: rook-ceph-provisioner
roleBinding: rook-ceph-provisioner
serviceAccount: rook-ceph-provisioner
images:
tags:
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
stx_ceph_manager: docker.io/starlingx/stx-ceph-manager:stx.10.0-v18.2.2-0
k8s_entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
kubectl: docker.io/bitnami/kubectl:1.29
provisionStorage:
# Defines the name of the provisioner associated with a set of storage classes
provisioner_name: rook-ceph.rbd.csi.ceph.com
# Enable this storage class as the system default storage class
defaultStorageClass: rook-ceph
# Configure storage classes.
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
# No need to add them to each class.
classdefaults:
# Define ip addresses of Ceph Monitors
monitors: 192.168.204.3:6789,192.168.204.4:6789,192.168.204.1:6789
# Ceph admin account
adminId: admin
# K8 secret name for the admin context
adminSecretName: ceph-secret
# Configure storage classes.
# This section should be tailored to your setup. It allows you to define multiple storage
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
classes:
name: rook-ceph # Name of storage class.
secret:
# K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-secret-kube
# Ceph user name to access this pool
userId: kube
pool:
pool_name: kube
replication: 1
crush_rule_name: storage_tier_ruleset
chunk_size: 8
cephfsStorage:
provisioner_name: rook-ceph.cephfs.csi.ceph.com
fs_name: kube-cephfs
pool_name: kube-cephfs-data
host_provision:
controller_hosts:
- controller-0
mgr_provision:
timeout_ceph: 240s
ceph_audit_jobs:
floatIP: 192.168.204.2
audit:
cron: "*/3 * * * *"
deadline: 200
history:
success: 1
failed: 1
hook:
image: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
cleanup:
enable: true
cluster_cleanup: rook-ceph
rbac:
clusterRole: rook-ceph-cleanup
clusterRoleBinding: rook-ceph-cleanup
role: rook-ceph-cleanup
roleBinding: rook-ceph-cleanup
serviceAccount: rook-ceph-cleanup
mon_hosts:
- controller-0