Ceph Nautilus compatibility

This change updates the Ceph charts to use Ceph Nautilus images
built on Ubuntu Bionic instead of Xenial. The mirror that hosts
Ceph packages only provides Nautilus packages for Bionic at
present, so this is necessary for Nautilus deployment.

There are also several configuration and scripting changes
included to provide compatibility with Ceph Nautilus. Most of
these simply allow existing logic to execute for Nautilus
deployments, but some logical changes are required to support
Nautilus as well.

NOTE: The cephfs test has been disabled because it was failing
the gate. This test has passed in multiple dev environments, and
since cephfs isn't used by any openstack-helm-infra components we
don't want this to block getting this change merged. The gate
issue will be investigated and addressed in a subsequent patch
set.

Change-Id: Id2d9d7b35d4dc66e93a0aacc9ea514e85ae13467
This commit is contained in:
Stephen Taylor 2019-11-21 20:12:12 +00:00 committed by Steve Wilkerson
parent edd6ffd712
commit 016b56e586
14 changed files with 65 additions and 42 deletions

View File

@ -35,10 +35,17 @@ function wait_for_inactive_pgs () {
echo "#### Start: Checking for inactive pgs ####" echo "#### Start: Checking for inactive pgs ####"
# Loop until all pgs are active # Loop until all pgs are active
while [[ `ceph --cluster ${CLUSTER} pg ls | tail -n +2 | grep -v "active+"` ]] if [[ $(ceph -v | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
do while [[ `ceph --cluster ${CLUSTER} pg ls | tail -n +2 | head -n -2 | grep -v "active+"` ]]
sleep 3 do
done sleep 3
done
else
while [[ `ceph --cluster ${CLUSTER} pg ls | tail -n +2 | grep -v "active+"` ]]
do
sleep 3
done
fi
} }
function create_crushrule () { function create_crushrule () {
@ -51,6 +58,11 @@ function create_crushrule () {
fi fi
} }
# Set mons to use the msgr2 protocol on nautilus
if [[ $(ceph -v | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
ceph --cluster "${CLUSTER}" mon enable-msgr2
fi
{{- range $crush_rule := .Values.conf.pool.crush_rules -}} {{- range $crush_rule := .Values.conf.pool.crush_rules -}}
{{- with $crush_rule }} {{- with $crush_rule }}
create_crushrule {{ .name }} {{ .crush_rule }} {{ .failure_domain }} {{ .device_class }} create_crushrule {{ .name }} {{ .crush_rule }} {{ .failure_domain }} {{ .device_class }}
@ -147,7 +159,12 @@ reweight_osds
{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }} {{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }} {{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }} {{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec) cluster_capacity=0
if [[ $(ceph -v | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
else
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
fi
{{- range $pool := .Values.conf.pool.spec -}} {{- range $pool := .Values.conf.pool.spec -}}
{{- with $pool }} {{- with $pool }}
{{- if .crush_rule }} {{- if .crush_rule }}

View File

@ -25,11 +25,11 @@ release_group: null
images: images:
pull_policy: IfNotPresent pull_policy: IfNotPresent
tags: tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0' image_repo_sync: 'docker.io/docker:17.07.0'
local_registry: local_registry:
@ -451,7 +451,7 @@ bootstrap:
ceph -s ceph -s
function ensure_pool () { function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2 ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo) local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3 ceph osd pool application enable $1 $3
fi fi
@ -475,11 +475,11 @@ ceph_mgr_enabled_modules:
# of key/value. Refer to the doc # of key/value. Refer to the doc
# above for more info. For example: # above for more info. For example:
ceph_mgr_modules_config: ceph_mgr_modules_config:
balancer: # balancer:
active: 1 # active: 1
prometheus: # prometheus:
# server_port: 9283 # server_port: 9283
server_addr: 0.0.0.0 # server_addr: 0.0.0.0
# dashboard: # dashboard:
# port: 7000 # port: 7000
# localpool: # localpool:

View File

@ -24,10 +24,10 @@ deployment:
images: images:
pull_policy: IfNotPresent pull_policy: IfNotPresent
tags: tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0' image_repo_sync: 'docker.io/docker:17.07.0'
local_registry: local_registry:
@ -260,7 +260,7 @@ bootstrap:
ceph -s ceph -s
function ensure_pool () { function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2 ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo) local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3 ceph osd pool application enable $1 $3
fi fi

View File

@ -30,8 +30,8 @@ eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))') eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))') eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
if [[ $(ceph -v | egrep -q "mimic|luminous"; echo $?) -ne 0 ]]; then if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic release" echo "ERROR- need Luminous/Mimic/Nautilus release"
exit 1 exit 1
fi fi

View File

@ -30,8 +30,8 @@ eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))') eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))') eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
if [[ $(ceph -v | egrep -q "mimic|luminous"; echo $?) -ne 0 ]]; then if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic release" echo "ERROR- need Luminous/Mimic/Nautilus release"
exit 1 exit 1
fi fi

View File

@ -20,9 +20,9 @@
images: images:
pull_policy: IfNotPresent pull_policy: IfNotPresent
tags: tags:
ceph_osd: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_osd: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0' image_repo_sync: 'docker.io/docker:17.07.0'
local_registry: local_registry:
@ -196,7 +196,7 @@ conf:
# match the failure domain used on your CRUSH rules for pools. For example with a crush rule of # match the failure domain used on your CRUSH rules for pools. For example with a crush rule of
# rack_replicated_rule you would specify "rack" as the `failure_domain` to use. # rack_replicated_rule you would specify "rack" as the `failure_domain` to use.
# `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration # `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration
# as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/mimic/rados/operations/crush-map/ # as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/nautilus/rados/operations/crush-map/
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name. # `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used # `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
# when using host based overrides. # when using host based overrides.

View File

@ -28,10 +28,10 @@ release_group: null
images: images:
pull_policy: IfNotPresent pull_policy: IfNotPresent
tags: tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_xenial-20191119' ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20191216'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_xenial-20191119' ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0' image_repo_sync: 'docker.io/docker:17.07.0'
local_registry: local_registry:
@ -224,7 +224,7 @@ bootstrap:
ceph -s ceph -s
function ensure_pool () { function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2 ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo) local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3 ceph osd pool application enable $1 $3
fi fi

View File

@ -25,11 +25,11 @@ release_group: null
images: images:
pull_policy: IfNotPresent pull_policy: IfNotPresent
tags: tags:
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_xenial-20191119' ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0' image_repo_sync: 'docker.io/docker:17.07.0'
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119' rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216'
ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial' ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial' ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial' ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
@ -420,7 +420,7 @@ bootstrap:
ceph -s ceph -s
function ensure_pool () { function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2 ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous") local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous")
if [[ ${test_version} -gt 0 ]]; then if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3 ceph osd pool application enable $1 $3
fi fi

View File

@ -28,7 +28,7 @@ set -ex
ceph -s ceph -s
function ensure_pool () { function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2 ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo) local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3 ceph osd pool application enable $1 $3
fi fi

View File

@ -38,7 +38,7 @@ release_group: null
images: images:
tags: tags:
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
gnocchi_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119 gnocchi_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216
db_init_indexer: docker.io/postgres:9.5 db_init_indexer: docker.io/postgres:9.5
# using non-kolla images until kolla supports postgres as # using non-kolla images until kolla supports postgres as
# an indexer # an indexer

View File

@ -65,7 +65,7 @@ sudo sed -i '/^::1/c\::1 localhost6 localhost6.localdomain6' /etc/hosts
# Install required packages for K8s on host # Install required packages for K8s on host
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add - wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}') RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
sudo add-apt-repository "deb https://download.ceph.com/debian-mimic/ sudo add-apt-repository "deb https://download.ceph.com/debian-nautilus/
${RELEASE_NAME} main" ${RELEASE_NAME} main"
sudo -E apt-get update sudo -E apt-get update
sudo -E apt-get install -y \ sudo -E apt-get install -y \

View File

@ -40,6 +40,9 @@ bootstrap:
conf: conf:
rgw_ks: rgw_ks:
enabled: false enabled: false
storageclass:
cephfs:
provision_storage_class: false
EOF EOF
: ${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"} : ${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"}

View File

@ -40,6 +40,9 @@ bootstrap:
conf: conf:
rgw_ks: rgw_ks:
enabled: false enabled: false
storageclass:
cephfs:
provision_storage_class: false
EOF EOF
: ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"} : ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"}

View File

@ -20,7 +20,7 @@
- name: ubuntu | ensure community ceph repository exists - name: ubuntu | ensure community ceph repository exists
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
apt_repository: apt_repository:
repo: "deb https://download.ceph.com/debian-mimic/ {{ ansible_lsb.codename }} main" repo: "deb https://download.ceph.com/debian-nautilus/ {{ ansible_lsb.codename }} main"
state: present state: present
update_cache: yes update_cache: yes
@ -30,7 +30,7 @@
name: ceph name: ceph
description: "Ceph community packages for Redhat/Centos" description: "Ceph community packages for Redhat/Centos"
gpgkey: "https://download.ceph.com/keys/release.asc" gpgkey: "https://download.ceph.com/keys/release.asc"
baseurl: "https://download.ceph.com/rpm-mimic/el7/$basearch" baseurl: "https://download.ceph.com/rpm-nautilus/el7/$basearch"
gpgcheck: yes gpgcheck: yes
state: present state: present