diff --git a/ceph-client/Chart.yaml b/ceph-client/Chart.yaml
index c55d08b56..4f7c696d3 100644
--- a/ceph-client/Chart.yaml
+++ b/ceph-client/Chart.yaml
@@ -15,6 +15,6 @@ apiVersion: v1
 appVersion: v1.0.0
 description: OpenStack-Helm Ceph Client
 name: ceph-client
-version: 0.1.5
+version: 0.1.6
 home: https://github.com/ceph/ceph-client
 ...
diff --git a/ceph-client/templates/bin/_helm-tests.sh.tpl b/ceph-client/templates/bin/_helm-tests.sh.tpl
index abbe137a8..0906c8159 100755
--- a/ceph-client/templates/bin/_helm-tests.sh.tpl
+++ b/ceph-client/templates/bin/_helm-tests.sh.tpl
@@ -43,10 +43,10 @@ function check_recovery_flags() {
 function check_osd_count() {
   echo "#### Start: Checking OSD count ####"
   noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
-  osd_stat=$(ceph osd stat -f json)
-  num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
-  num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
-  num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
+  osd_stat=$(ceph osd stat -f json-pretty)
+  num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
 
   MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
   if [ ${MIN_OSDS} -lt 1 ]; then
@@ -188,7 +188,7 @@ function pool_validation() {
         exit 1
       fi
     fi
-    if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
+    if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
       if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
         || [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
         echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
diff --git a/ceph-client/templates/bin/mgr/_start.sh.tpl b/ceph-client/templates/bin/mgr/_start.sh.tpl
index 6fe36d0f8..6f619b7ab 100644
--- a/ceph-client/templates/bin/mgr/_start.sh.tpl
+++ b/ceph-client/templates/bin/mgr/_start.sh.tpl
@@ -44,7 +44,7 @@ ceph --cluster "${CLUSTER}" -v
 # Env. variables matching the pattern "<module>_" will be
 # found and parsed for config-key settings by
 #  ceph config set mgr mgr/<module>/<key> <value>
-MODULES_TO_DISABLE=`ceph mgr dump | python -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
+MODULES_TO_DISABLE=`ceph mgr dump | python3 -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
 
 for module in ${ENABLED_MODULES}; do
     # This module may have been enabled in the past
@@ -57,7 +57,7 @@ for module in ${ENABLED_MODULES}; do
         option=${option/${module}_/}
         key=`echo $option | cut -d= -f1`
         value=`echo $option | cut -d= -f2`
-        if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
+        if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
           ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value --force
         else
           ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value
diff --git a/ceph-client/templates/bin/pool/_init.sh.tpl b/ceph-client/templates/bin/pool/_init.sh.tpl
index 73f004ae7..0601d33cd 100644
--- a/ceph-client/templates/bin/pool/_init.sh.tpl
+++ b/ceph-client/templates/bin/pool/_init.sh.tpl
@@ -35,7 +35,7 @@ function wait_for_pgs () {
   pgs_ready=0
   query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
 
-  if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
+  if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
     query=".pg_stats | ${query}"
   fi
 
@@ -70,10 +70,11 @@ function check_recovery_flags () {
 function check_osd_count() {
   echo "#### Start: Checking OSD count ####"
   noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
-  osd_stat=$(ceph osd stat -f json)
-  num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
-  num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
-  num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
+  osd_stat=$(ceph osd stat -f json-pretty)
+  num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+
   EXPECTED_OSDS={{.Values.conf.pool.target.osd}}
   REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}
 
@@ -123,7 +124,7 @@ function create_crushrule () {
 }
 
 # Set mons to use the msgr2 protocol on nautilus
-if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
+if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
   ceph --cluster "${CLUSTER}" mon enable-msgr2
 fi
 
@@ -183,7 +184,7 @@ function create_pool () {
     ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
   fi
 
-  if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
+  if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
     ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode on
   else
     ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode off
@@ -199,7 +200,7 @@ function create_pool () {
   ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
   ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
 # set pg_num to pool
-  if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
+  if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
     ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "pg_num" "${POOL_PLACEMENT_GROUPS}"
   else
     for PG_PARAM in pg_num pgp_num; do
@@ -246,10 +247,10 @@ function manage_pool () {
   POOL_PROTECTION=$8
   CLUSTER_CAPACITY=$9
   TOTAL_OSDS={{.Values.conf.pool.target.osd}}
-  POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
+  POOL_PLACEMENT_GROUPS=$(python3 /tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
   create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
   POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}')
-  POOL_QUOTA=$(python -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
+  POOL_QUOTA=$(python3 -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
   ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA
 }
 
@@ -262,12 +263,16 @@ reweight_osds
 {{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
 {{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
 cluster_capacity=0
-if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
+if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
   cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
-  enable_or_disable_autoscaling
 else
   cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
 fi
+
+if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -eq 14 ]]; then
+  enable_or_disable_autoscaling
+fi
+
 {{- range $pool := .Values.conf.pool.spec -}}
 {{- with $pool }}
 {{- if .crush_rule }}
diff --git a/ceph-client/templates/bin/utils/_checkPGs.py.tpl b/ceph-client/templates/bin/utils/_checkPGs.py.tpl
index 40f74f3d6..9836b7ccc 100755
--- a/ceph-client/templates/bin/utils/_checkPGs.py.tpl
+++ b/ceph-client/templates/bin/utils/_checkPGs.py.tpl
@@ -106,9 +106,9 @@ class cephCRUSH():
         """Replica of the pool.  Initialize to 0."""
         self.poolSize = 0
 
-    def isNautilus(self):
-        grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True))  # nosec
-        return grepResult == 0
+    def isSupportedRelease(self):
+        cephMajorVer = int(subprocess.check_output("ceph mon versions | awk '/version/{print $3}' | cut -d. -f1", shell=True))  # nosec
+        return cephMajorVer >= 14
 
     def getPoolSize(self, poolName):
         """
@@ -129,7 +129,7 @@ class cephCRUSH():
         return
 
     def checkPGs(self, poolName):
-        poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs
+        poolPGs = self.poolPGs['pg_stats'] if self.isSupportedRelease() else self.poolPGs
         if not poolPGs:
             return
         print('Checking PGs in pool {} ...'.format(poolName)),
diff --git a/ceph-client/templates/bin/utils/_checkPGs.sh.tpl b/ceph-client/templates/bin/utils/_checkPGs.sh.tpl
index 8971ea571..1a820ca2f 100644
--- a/ceph-client/templates/bin/utils/_checkPGs.sh.tpl
+++ b/ceph-client/templates/bin/utils/_checkPGs.sh.tpl
@@ -18,4 +18,4 @@ set -ex
 
 mgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)
 
-kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- /tmp/utils-checkPGs.py All 2>/dev/null
+kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- python3 /tmp/utils-checkPGs.py All 2>/dev/null
diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml
index c422793d4..9d341acf0 100644
--- a/ceph-client/values.yaml
+++ b/ceph-client/values.yaml
@@ -24,11 +24,11 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
-    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/docker:17.07.0'
   local_registry:
@@ -326,6 +326,11 @@ conf:
     # the ceph pool management job, as it tunes the pgs and crush rule, based on
     # the above.
     spec:
+      # Health metrics pool
+      - name: device_health_metrics
+        application: mgr_devicehealth
+        replication: 1
+        percent_total_data: 5
       # RBD pool
       - name: rbd
         application: rbd
@@ -404,7 +409,7 @@ conf:
       - name: default.rgw.buckets.data
         application: rgw
         replication: 3
-        percent_total_data: 34.8
+        percent_total_data: 29
 
   ceph:
     global:
@@ -497,8 +502,7 @@ bootstrap:
     ceph -s
     function ensure_pool () {
       ceph osd pool stats $1 || ceph osd pool create $1 $2
-      local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
-      if [[ ${test_version} -gt 0 ]]; then
+      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
         ceph osd pool application enable $1 $3
       fi
     }
diff --git a/ceph-mon/Chart.yaml b/ceph-mon/Chart.yaml
index 93822be51..2ed9b165e 100644
--- a/ceph-mon/Chart.yaml
+++ b/ceph-mon/Chart.yaml
@@ -15,6 +15,6 @@ apiVersion: v1
 appVersion: v1.0.0
 description: OpenStack-Helm Ceph Mon
 name: ceph-mon
-version: 0.1.3
+version: 0.1.4
 home: https://github.com/ceph/ceph
 ...
diff --git a/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl b/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl
index 874dd4839..5c031aa72 100644
--- a/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl
+++ b/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl
@@ -20,7 +20,7 @@ set -ex
 {{- $envAll := . }}
 
 function ceph_gen_key () {
-  python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
+  python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
 }
 
 function kube_ceph_keyring_gen () {
diff --git a/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl b/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
index 5b8d292dd..598033253 100644
--- a/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
+++ b/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
@@ -19,7 +19,7 @@ set -ex
 {{- $envAll := . }}
 
 function ceph_gen_key () {
-  python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
+  python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
 }
 
 function kube_ceph_keyring_gen () {
diff --git a/ceph-mon/templates/bin/moncheck/_start.sh.tpl b/ceph-mon/templates/bin/moncheck/_start.sh.tpl
index dfb86af92..4dc4f90fd 100644
--- a/ceph-mon/templates/bin/moncheck/_start.sh.tpl
+++ b/ceph-mon/templates/bin/moncheck/_start.sh.tpl
@@ -16,7 +16,7 @@ else
 fi
 
 function check_mon_msgr2 {
- if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
+ if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
    if ceph health detail|grep -i "MON_MSGR2_NOT_ENABLED"; then
      echo "ceph-mon msgr v2 not enabled on all ceph mons so enabling"
      ceph mon enable-msgr2
diff --git a/ceph-mon/values.yaml b/ceph-mon/values.yaml
index b1e23f55e..08cfc8e10 100644
--- a/ceph-mon/values.yaml
+++ b/ceph-mon/values.yaml
@@ -23,10 +23,10 @@ deployment:
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
-    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/docker:17.07.0'
   local_registry:
@@ -292,8 +292,7 @@ bootstrap:
     ceph -s
     function ensure_pool () {
       ceph osd pool stats $1 || ceph osd pool create $1 $2
-      local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
-      if [[ ${test_version} -gt 0 ]]; then
+      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
         ceph osd pool application enable $1 $3
       fi
     }
diff --git a/ceph-osd/Chart.yaml b/ceph-osd/Chart.yaml
index be891b3f3..99b21726c 100644
--- a/ceph-osd/Chart.yaml
+++ b/ceph-osd/Chart.yaml
@@ -15,6 +15,6 @@ apiVersion: v1
 appVersion: v1.0.0
 description: OpenStack-Helm Ceph OSD
 name: ceph-osd
-version: 0.1.17
+version: 0.1.18
 home: https://github.com/ceph/ceph
 ...
diff --git a/ceph-osd/templates/bin/_helm-tests.sh.tpl b/ceph-osd/templates/bin/_helm-tests.sh.tpl
index a217d701e..6c47f8f78 100644
--- a/ceph-osd/templates/bin/_helm-tests.sh.tpl
+++ b/ceph-osd/templates/bin/_helm-tests.sh.tpl
@@ -19,10 +19,10 @@ set -ex
 function check_osd_count() {
   echo "#### Start: Checking OSD count ####"
   noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
-  osd_stat=$(ceph osd stat -f json)
-  num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
-  num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
-  num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
+  osd_stat=$(ceph osd stat -f json-pretty)
+  num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
+  num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
 
   MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
   if [ ${MIN_OSDS} -lt 1 ]; then
diff --git a/ceph-osd/templates/bin/_post-apply.sh.tpl b/ceph-osd/templates/bin/_post-apply.sh.tpl
index aeb91c531..ac71cbc66 100644
--- a/ceph-osd/templates/bin/_post-apply.sh.tpl
+++ b/ceph-osd/templates/bin/_post-apply.sh.tpl
@@ -89,7 +89,7 @@ function wait_for_pgs () {
   pgs_inactive=0
   query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
 
-  if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
+  if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
     query=".pg_stats | ${query}"
   fi
 
diff --git a/ceph-osd/templates/bin/osd/ceph-disk/_common.sh.tpl b/ceph-osd/templates/bin/osd/ceph-disk/_common.sh.tpl
index 2f75f1a38..0960a569d 100644
--- a/ceph-osd/templates/bin/osd/ceph-disk/_common.sh.tpl
+++ b/ceph-osd/templates/bin/osd/ceph-disk/_common.sh.tpl
@@ -31,8 +31,8 @@ eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c '
 eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
 eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
 
-if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
-    echo "ERROR- need Luminous/Mimic/Nautilus release"
+if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then
+    echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release"
     exit 1
 fi
 
diff --git a/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl b/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
index 98979dbd2..0601ba063 100644
--- a/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
+++ b/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
@@ -115,15 +115,15 @@ alias wipefs='locked wipefs'
 alias sgdisk='locked sgdisk'
 alias dd='locked dd'
 
-eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
-eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
-eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
-eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
+eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
+eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
+eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
+eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
 eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
-eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
+eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
 
-if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
-    echo "ERROR- need Luminous/Mimic/Nautilus release"
+if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -lt 12 ]]; then
+    echo "ERROR - The minimum Ceph version supported is Luminous 12.x.x"
     exit 1
 fi
 
diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml
index 7fee7d675..515e88240 100644
--- a/ceph-osd/values.yaml
+++ b/ceph-osd/values.yaml
@@ -19,9 +19,9 @@
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
+    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/docker:17.07.0'
   local_registry:
diff --git a/ceph-provisioners/Chart.yaml b/ceph-provisioners/Chart.yaml
index 6d5f89164..ab7fe7bd3 100644
--- a/ceph-provisioners/Chart.yaml
+++ b/ceph-provisioners/Chart.yaml
@@ -15,6 +15,6 @@ apiVersion: v1
 appVersion: v1.0.0
 description: OpenStack-Helm Ceph Provisioner
 name: ceph-provisioners
-version: 0.1.2
+version: 0.1.3
 home: https://github.com/ceph/ceph
 ...
diff --git a/ceph-provisioners/values.yaml b/ceph-provisioners/values.yaml
index 1f264edcd..6fc372747 100644
--- a/ceph-provisioners/values.yaml
+++ b/ceph-provisioners/values.yaml
@@ -27,10 +27,10 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
     ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
-    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20200521'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:change_770201_ubuntu_bionic-20210113'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/docker:17.07.0'
   local_registry:
@@ -246,8 +246,7 @@ bootstrap:
     ceph -s
     function ensure_pool () {
       ceph osd pool stats $1 || ceph osd pool create $1 $2
-      local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
-      if [[ ${test_version} -gt 0 ]]; then
+      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
         ceph osd pool application enable $1 $3
       fi
     }
diff --git a/ceph-rgw/Chart.yaml b/ceph-rgw/Chart.yaml
index dfebe5fe4..4c5d762c8 100644
--- a/ceph-rgw/Chart.yaml
+++ b/ceph-rgw/Chart.yaml
@@ -15,6 +15,6 @@ apiVersion: v1
 appVersion: v1.0.0
 description: OpenStack-Helm Ceph RadosGW
 name: ceph-rgw
-version: 0.1.1
+version: 0.1.2
 home: https://github.com/ceph/ceph
 ...
diff --git a/ceph-rgw/values.yaml b/ceph-rgw/values.yaml
index e9af5a55a..19da50477 100644
--- a/ceph-rgw/values.yaml
+++ b/ceph-rgw/values.yaml
@@ -24,12 +24,12 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
-    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/docker:17.07.0'
-    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
+    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
     ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
@@ -489,8 +489,7 @@ bootstrap:
     ceph -s
     function ensure_pool () {
       ceph osd pool stats $1 || ceph osd pool create $1 $2
-      local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous")
-      if [[ ${test_version} -gt 0 ]]; then
+      if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
         ceph osd pool application enable $1 $3
       fi
     }
diff --git a/tools/deployment/osh-infra-logging/020-ceph.sh b/tools/deployment/osh-infra-logging/020-ceph.sh
index 5d4147083..095b4695b 100755
--- a/tools/deployment/osh-infra-logging/020-ceph.sh
+++ b/tools/deployment/osh-infra-logging/020-ceph.sh
@@ -86,6 +86,11 @@ conf:
     default:
       crush_rule: same_host
     spec:
+      # Health metrics pool
+      - name: device_health_metrics
+        application: mgr_devicehealth
+        replication: 1
+        percent_total_data: 5
       # RBD pool
       - name: rbd
         application: rbd
@@ -160,7 +165,7 @@ conf:
       - name: default.rgw.buckets.data
         application: rgw
         replication: 1
-        percent_total_data: 34.8
+        percent_total_data: 29
   storage:
     osd:
       - data: