Merge "Secure pool during deployment"

This commit is contained in:
Zuul 2018-10-17 07:37:38 +00:00 committed by Gerrit Code Review
commit 1b7240c64c
2 changed files with 35 additions and 2 deletions
ceph-client
templates/bin/pool
values.yaml

@ -50,6 +50,7 @@ function create_pool () {
POOL_REPLICATION=$3
POOL_PLACEMENT_GROUPS=$4
POOL_CRUSH_RULE=$5
POOL_PROTECTION=$6
if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then
ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
@ -58,6 +59,14 @@ function create_pool () {
fi
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
fi
#
# Make sure pool is not protected after creation AND expansion so we can manipulate its settings.
# Final protection settings are applied once parameters (size, pg) have been adjusted.
#
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nosizechange false
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nopgchange false
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nodelete false
#
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
for PG_PARAM in pg_num pgp_num; do
@ -66,6 +75,26 @@ function create_pool () {
ceph --cluster ceph osd pool set "${POOL_NAME}" "${PG_PARAM}" "${POOL_PLACEMENT_GROUPS}"
fi
done
#
# Handling of .Values.conf.pool.target.protected:
# Possible settings
# - true | 1 = Protect the pools after they get created
# - false | 0 = Do not protect the pools once they get created and let Ceph defaults apply
# - Absent = Do not protect the pools once they get created and let Ceph defaults apply
#
# If protection is not requested through values.yaml, just use the Ceph defaults. With Luminous we do not
# apply any protection to the pools when they get created.
#
# Note: If the /etc/ceph/ceph.conf file modifies the defaults the deployment will fail on pool creation
# - nosizechange = Do not allow size and min_size changes on the pool
# - nopgchange = Do not allow pg_num and pgp_num changes on the pool
# - nodelete = Do not allow deletion of the pool
#
if [ "x${POOL_PROTECTION}" == "xtrue" ] || [ "x${POOL_PROTECTION}" == "x1" ]; then
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nosizechange true
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nopgchange true
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nodelete true
fi
}
function manage_pool () {
@ -76,8 +105,9 @@ function manage_pool () {
TOTAL_DATA_PERCENT=$5
TARGET_PG_PER_OSD=$6
POOL_CRUSH_RULE=$7
POOL_PROTECTION=$8
POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}"
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
}
reweight_osds
@ -85,12 +115,14 @@ reweight_osds
{{ $targetNumOSD := .Values.conf.pool.target.osd }}
{{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}
{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
{{- range $pool := .Values.conf.pool.spec -}}
{{- with $pool }}
manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }}
manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }} {{ $targetProtection }}
{{- end }}
{{- end }}
{{- if .Values.conf.pool.crush.tunables }}
ceph --cluster "${CLUSTER}" osd crush tunables {{ .Values.conf.pool.crush.tunables }}
{{- end }}

@ -126,6 +126,7 @@ conf:
# to match the number of nodes in the OSH gate.
osd: 5
pg_per_osd: 100
protected: true
default:
#NOTE(portdirect): this should be 'same_host' for a single node
# cluster to be in a healthy state