From 55f1d2db57fbce1539a036ab00634e0f2d585635 Mon Sep 17 00:00:00 2001
From: Jean-Charles Lopez <jl970p@att.com>
Date: Thu, 20 Sep 2018 10:48:33 -0700
Subject: [PATCH] Secure pool during deployment

Change-Id: Ifbeb956ab2c015deaed501ee4bff22dfc1e0404f
---
 ceph-client/templates/bin/pool/_init.sh.tpl | 36 +++++++++++++++++++--
 ceph-client/values.yaml                     |  1 +
 2 files changed, 35 insertions(+), 2 deletions(-)

diff --git a/ceph-client/templates/bin/pool/_init.sh.tpl b/ceph-client/templates/bin/pool/_init.sh.tpl
index a488f5ed1..f776b49e3 100644
--- a/ceph-client/templates/bin/pool/_init.sh.tpl
+++ b/ceph-client/templates/bin/pool/_init.sh.tpl
@@ -50,6 +50,7 @@ function create_pool () {
   POOL_REPLICATION=$3
   POOL_PLACEMENT_GROUPS=$4
   POOL_CRUSH_RULE=$5
+  POOL_PROTECTION=$6
   if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then
     ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
     while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
@@ -58,6 +59,14 @@ function create_pool () {
     fi
     ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
   fi
+#
+# Make sure pool is not protected after creation AND expansion so we can manipulate its settings.
+# Final protection settings are applied once parameters (size, pg) have been adjusted.
+#
+  ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nosizechange false
+  ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nopgchange false
+  ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nodelete false
+#
   ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
   ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
   for PG_PARAM in pg_num pgp_num; do
@@ -66,6 +75,26 @@ function create_pool () {
       ceph --cluster ceph osd pool set "${POOL_NAME}" "${PG_PARAM}" "${POOL_PLACEMENT_GROUPS}"
     fi
   done
+#
+# Handling of .Values.conf.pool.target.protected:
+# Possible settings
+# - true  | 1 = Protect the pools after they get created
+# - false | 0 = Do not protect the pools once they get created and let Ceph defaults apply
+# - Absent    = Do not protect the pools once they get created and let Ceph defaults apply
+#
+# If protection is not requested through values.yaml, just use the Ceph defaults. With Luminous we do not
+# apply any protection to the pools when they get created.
+#
+# Note: If the /etc/ceph/ceph.conf file modifies the defaults the deployment will fail on pool creation
+# - nosizechange = Do not allow size and min_size changes on the pool
+# - nopgchange   = Do not allow pg_num and pgp_num changes on the pool
+# - nodelete     = Do not allow deletion of the pool
+#
+  if [ "x${POOL_PROTECTION}" == "xtrue" ] ||  [ "x${POOL_PROTECTION}" == "x1" ]; then
+    ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nosizechange true
+    ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nopgchange true
+    ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" nodelete true
+  fi
 }
 
 function manage_pool () {
@@ -76,8 +105,9 @@ function manage_pool () {
   TOTAL_DATA_PERCENT=$5
   TARGET_PG_PER_OSD=$6
   POOL_CRUSH_RULE=$7
+  POOL_PROTECTION=$8
   POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
-  create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}"
+  create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
 }
 
 reweight_osds
@@ -85,12 +115,14 @@ reweight_osds
 {{ $targetNumOSD := .Values.conf.pool.target.osd }}
 {{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}
 {{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
+{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
 {{- range $pool := .Values.conf.pool.spec -}}
 {{- with $pool }}
-manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }}
+manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }} {{ $targetProtection }}
 {{- end }}
 {{- end }}
 
 {{- if .Values.conf.pool.crush.tunables }}
 ceph --cluster "${CLUSTER}" osd crush tunables {{ .Values.conf.pool.crush.tunables }}
 {{- end }}
+
diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml
index ea7196ffc..631f098ad 100644
--- a/ceph-client/values.yaml
+++ b/ceph-client/values.yaml
@@ -126,6 +126,7 @@ conf:
       # to match the number of nodes in the OSH gate.
       osd: 5
       pg_per_osd: 100
+      protected: true
     default:
       #NOTE(portdirect): this should be 'same_host' for a single node
       # cluster to be in a healthy state