Ceph: Update failure domain overrides to support dynamic config
This PS updates the ceph failure domain overrides to support dynamic configuration based on host/label based overrides. Also fixes typo identified in the following ps for directories: * https://review.openstack.org/#/c/623670/1 Change-Id: Ia449be23353083f9a77df2b592944571c907e277 Signed-off-by: Pete Birley <pete@port.direct>
This commit is contained in:
parent
d50bd2daad
commit
7608d2c9d7
@ -25,6 +25,10 @@ set -ex
|
|||||||
: "${OSD_SOFT_FORCE_ZAP:=1}"
|
: "${OSD_SOFT_FORCE_ZAP:=1}"
|
||||||
: "${OSD_JOURNAL_PARTITION:=}"
|
: "${OSD_JOURNAL_PARTITION:=}"
|
||||||
|
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
||||||
|
|
||||||
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
||||||
export OSD_DEVICE="/var/lib/ceph/osd"
|
export OSD_DEVICE="/var/lib/ceph/osd"
|
||||||
else
|
else
|
||||||
|
@ -7,6 +7,10 @@ export LC_ALL=C
|
|||||||
: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
|
: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
|
||||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||||
|
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||||
|
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
||||||
|
|
||||||
function is_available {
|
function is_available {
|
||||||
command -v $@ &>/dev/null
|
command -v $@ &>/dev/null
|
||||||
}
|
}
|
||||||
@ -95,7 +99,7 @@ if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
|
|||||||
osd crush move "${HOSTNAME}" "${crush_failure_domain_type}=${crush_failure_domain_name}" || true
|
osd crush move "${HOSTNAME}" "${crush_failure_domain_type}=${crush_failure_domain_name}" || true
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
if [ "x${CRUSH_FAILURE_DOMAIN_TYPE}" != "host" ]; then
|
if [ "x${CRUSH_FAILURE_DOMAIN_TYPE}" != "xhost" ]; then
|
||||||
if [ "x${CRUSH_FAILURE_DOMAIN_NAME}" != "xfalse" ]; then
|
if [ "x${CRUSH_FAILURE_DOMAIN_NAME}" != "xfalse" ]; then
|
||||||
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "${CRUSH_FAILURE_DOMAIN_NAME}"
|
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "${CRUSH_FAILURE_DOMAIN_NAME}"
|
||||||
elif [ "x${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}" != "xfalse" ]; then
|
elif [ "x${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}" != "xfalse" ]; then
|
||||||
|
@ -48,6 +48,8 @@ metadata:
|
|||||||
data:
|
data:
|
||||||
ceph.conf: |
|
ceph.conf: |
|
||||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||||
|
storage.json: |
|
||||||
|
{{ toPrettyJson .Values.conf.storage | indent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.manifests.configmap_etc }}
|
{{- if .Values.manifests.configmap_etc }}
|
||||||
|
@ -193,12 +193,6 @@ spec:
|
|||||||
value: "ceph"
|
value: "ceph"
|
||||||
- name: CEPH_GET_ADMIN_KEY
|
- name: CEPH_GET_ADMIN_KEY
|
||||||
value: "1"
|
value: "1"
|
||||||
- name: CRUSH_FAILURE_DOMAIN_TYPE
|
|
||||||
value: {{ .Values.conf.storage.failure_domain | default "host" | quote }}
|
|
||||||
- name: CRUSH_FAILURE_DOMAIN_NAME
|
|
||||||
value: {{ .Values.conf.storage.failure_domain_name | default "false" | quote }}
|
|
||||||
- name: CRUSH_FAILURE_DOMAIN_BY_HOSTNAME
|
|
||||||
value: {{ .Values.conf.storage.failure_domain_by_hostname | default "false" | quote }}
|
|
||||||
- name: NAMESPACE
|
- name: NAMESPACE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
@ -252,6 +246,10 @@ spec:
|
|||||||
mountPath: /tmp/utils-checkDNS.sh
|
mountPath: /tmp/utils-checkDNS.sh
|
||||||
subPath: utils-checkDNS.sh
|
subPath: utils-checkDNS.sh
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
- name: ceph-osd-etc
|
||||||
|
mountPath: /etc/ceph/storage.json
|
||||||
|
subPath: storage.json
|
||||||
|
readOnly: true
|
||||||
- name: ceph-osd-etc
|
- name: ceph-osd-etc
|
||||||
mountPath: /etc/ceph/ceph.conf.template
|
mountPath: /etc/ceph/ceph.conf.template
|
||||||
subPath: ceph.conf
|
subPath: ceph.conf
|
||||||
|
@ -128,9 +128,9 @@ conf:
|
|||||||
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
|
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
|
||||||
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
|
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
|
||||||
# when using host based overrides.
|
# when using host based overrides.
|
||||||
# failure_domain: "rack"
|
failure_domain: "host"
|
||||||
# failure_domain_by_hostname: 1-8
|
failure_domain_by_hostname: "false"
|
||||||
# failure_domain_name: false
|
failure_domain_name: "false"
|
||||||
|
|
||||||
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
|
||||||
# define OSD pods that will be deployed across the cluster.
|
# define OSD pods that will be deployed across the cluster.
|
||||||
|
Loading…
Reference in New Issue
Block a user