From 784716d949c64b3ee653f95004ae978c60d295c3 Mon Sep 17 00:00:00 2001 From: Goutham Pacha Ravi Date: Thu, 9 May 2024 17:46:14 -0700 Subject: [PATCH] Drop support for package based installation of Ceph This mode of deployment isn't supported by the Ceph community, and was always a chimera that we were feeding/maintaining. Ceph's tool of choice to bootstrap and install a ceph cluster is by using the Ceph Orchestrator (via the cephadm tool). We're also cleaning up the old/unused and poorly tested "CONTAINERIZED_CEPH". When using ceph orchestrator, ceph daemons are run within podman containers on the devstack host. Change-Id: I5f75cb829383d7acd536e24c70cc4418d93c13bc Signed-off-by: Goutham Pacha Ravi --- .zuul.yaml | 3 - README.rst | 39 +- devstack/lib/ceph | 1103 -------------------------- devstack/lib/cephadm | 2 +- devstack/lib/{common => nfs-ganesha} | 0 devstack/override-defaults | 17 +- devstack/plugin.sh | 172 +--- devstack/settings | 20 +- 8 files changed, 33 insertions(+), 1323 deletions(-) delete mode 100755 devstack/lib/ceph rename devstack/lib/{common => nfs-ganesha} (100%) diff --git a/.zuul.yaml b/.zuul.yaml index e0ca5269..bb85b5b0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -27,7 +27,6 @@ TEMPEST_RUN_VALIDATION: True MYSQL_REDUCE_MEMORY: True DISABLE_CEPHADM_POST_DEPLOY: True - CEPHADM_DEPLOY: true devstack_plugins: devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph devstack_services: @@ -112,7 +111,6 @@ ENABLE_VOLUME_MULTIATTACH: true TEMPEST_RUN_VALIDATION: true USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false - CEPHADM_DEPLOY: True DISABLE_CEPHADM_POST_DEPLOY: True MYSQL_REDUCE_MEMORY: True REMOTE_CEPH: False @@ -132,7 +130,6 @@ subnode: devstack_localrc: REMOTE_CEPH: True - CEPHADM_DEPLOY: True CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa - project-template: diff --git a/README.rst b/README.rst index 62539d61..2bdac0b7 100644 --- a/README.rst +++ b/README.rst @@ -12,23 +12,18 @@ services As part of ``stack.sh``: -- Installs Ceph (client and server) packages -- Creates a Ceph cluster for use with openstack services +- Creates a Ceph cluster for use with openstack services using Ceph orchestrator - Configures Ceph as the storage backend for Cinder, Cinder Backup, - Nova, Manila (not by default), and Glance services + Nova, Manila, and Glance services - (Optionally) Sets up & configures Rados gateway (aka rgw or radosgw) - as a Swift endpoint with Keystone integration -- Set ``ENABLE_CEPH_RGW=True`` in your ``localrc`` + as a Swift endpoint with Keystone integration. Set ``ENABLE_CEPH_RGW=True`` + in your ``localrc`` - Supports Ceph cluster running local or remote to openstack services As part of ``unstack.sh`` \| ``clean.sh``: - Tears down the Ceph cluster and its related services -This plugin also gets used to configure Ceph as the storage backend for -the upstream Ceph CI job named -``gate-tempest-dsvm-full-devstack-plugin-ceph`` - Usage ----- @@ -40,7 +35,7 @@ Usage Run ``stack.sh`` in your devstack tree and boom! You're good to go. - Ceph is setup as the default storage backend for Cinder, Cinder - Backup, Glance and Nova services. You have the ability to control + Backup, Glance, Manila and Nova services. You have the ability to control each of the enabled services with the following configuration in your ``local.conf``: @@ -50,18 +45,12 @@ Run ``stack.sh`` in your devstack tree and boom! You're good to go. ENABLE_CEPH_GLANCE=True # store images in ceph ENABLE_CEPH_C_BAK=True # backup volumes to ceph ENABLE_CEPH_NOVA=True # allow nova to use ceph resources + ENABLE_CEPH_MANILA=True # allow manila to use CephFS as backend (Native CephFS or CephFS via NFS) Change any of the above lines to ``False`` to disable that feature specifically. -- Ceph can be enabled as the storage backend for Manila with the - following setting in your ``local.conf``: - - :: - - ENABLE_CEPH_MANILA=True - -CephFS Native driver that supports native Ceph protocol is used by +Manila's CephFS Native driver that supports native Ceph protocol is enabled by default. To use CephFS NFS-Ganesha driver that supports NFS protocol add the setting: @@ -69,6 +58,13 @@ the setting: MANILA_CEPH_DRIVER=cephfsnfs +If you'd like to use a standalone NFS Ganesha service in place of ceph orchestrator +deployed ``ceph-nfs`` service, set: + +:: + + CEPHADM_DEPLOY_NFS=False + Make sure that the manila plugin is enabled before devstack-plugin-ceph in the ``local.conf`` file. @@ -81,13 +77,6 @@ Known Issues / Limitations - Tempest test failures when using RGW as swift endpoint - Tempest fails due to verify-tempest-config erroring out, when using RGW as swift endpoint -- Manila with CephFS - for Ubuntu, support only for Trusty Tahr (14.04 - LTS) and beyond - -TODOs ------ - -- Fix Rados Gateway with Keystone for Swift on Fedora Bugs ---- diff --git a/devstack/lib/ceph b/devstack/lib/ceph deleted file mode 100755 index 98b78f19..00000000 --- a/devstack/lib/ceph +++ /dev/null @@ -1,1103 +0,0 @@ -#!/bin/bash -# -# lib/ceph -# Functions to control the configuration -# and operation of the **Ceph** storage service - -# Dependencies: -# -# - ``functions`` file -# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_ceph -# - configure_ceph -# - start_ceph -# - stop_ceph -# - cleanup_ceph -# - cleanup_containerized_ceph - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace -source $CEPH_PLUGIN_DIR/lib/common - - -# Defaults -# -------- - -TEST_MASTER=$(trueorfalse False TEST_MASTER) - -CEPH_RELEASE=${CEPH_RELEASE:-quincy} - -# Deploy a Ceph demo container instead of a non-containerized version -CEPH_CONTAINERIZED=$(trueorfalse False CEPH_CONTAINERIZED) - -# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. -# Default is the common DevStack data directory. -CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} -CEPH_DISK_IMAGE=${CEPH_DISK_IMAGE:-${CEPH_DATA_DIR}/drives/images/ceph.img} - -# Set ``CEPH_CONF_DIR`` to the location of the configuration files. -# Default is ``/etc/ceph``. -CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} - -# DevStack will create a loop-back disk formatted as XFS to store the -# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in -# kilobytes. -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-8GB} -CEPH_LOOPBACK_DISK_SIZE_DEFAULT=${CEPH_LOOPBACK_DISK_SIZE_DEFAULT:-$VOLUME_BACKING_FILE_SIZE} -CEPH_LOOPBACK_DISK_SIZE=\ -${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} - -# Common -CEPH_FSID=$(uuidgen) -CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf -MDS_ID=${MDS_ID:-a} -MGR_ID=${MGR_ID:-x} - -# RBD configuration defaults -if [[ ${DISTRO} =~ (bionic|xenial) ]]; then - CEPH_RBD_DEFAULT_FEATURES=${CEPH_RBD_DEFAULT_FEATURES:-"layering, exclusive-lock"} -else - CEPH_RBD_DEFAULT_FEATURES=${CEPH_RBD_DEFAULT_FEATURES:-"layering, exclusive-lock, object-map, fast-diff"} -fi - -# Glance -GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} -GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} -GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} -GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} -GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False} - -# Nova -NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} -NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} -NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} - -# Cinder -CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} -CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} -CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} -CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} -CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} - -# Manila -CEPHFS_POOL_PG=${CEPHFS_POOL_PG:-8} - -# Multiple filesystems enable more than one devstack to share -# the same REMOTE_CEPH cluster. Note that in addition to setting -# CEPHFS_MULTIPLE_FILESYSTEMS and REMOTE_CEPH, each devstack -# needs to set distinct values for CEPHFS_FILESYSTEM, -# CEPHFS_METADATA_POOL, and CEPHFS_DATA_POOL. -CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False} -CEPHFS_FILESYSTEM=${CEPHFS_FILESYSTEM:-cephfs} -CEPHFS_METADATA_POOL=${CEPHFS_METADATA_POOL:-cephfs_metadata} -CEPHFS_DATA_POOL=${CEPHFS_DATA_POOL:-cephfs_data} - -MANILA_CEPH_DRIVER=${MANILA_CEPH_DRIVER:-cephfsnative} -MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila} - -# Set ``CEPH_REPLICAS`` to configure how many replicas are to be -# configured for your Ceph cluster. By default we are configuring -# only one replica since this is way less CPU and memory intensive. If -# you are planning to test Ceph replication feel free to increase this value -CEPH_REPLICAS=${CEPH_REPLICAS:-1} -CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) - -# Rados gateway -CEPH_RGW_PORT=${CEPH_RGW_PORT:-8080} -CEPH_RGW_IDENTITY_API_VERSION=${CEPH_RGW_IDENTITY_API_VERSION:-3} -CEPH_RGW_KEYSTONE_SSL=$(trueorfalse False CEPH_RGW_KEYSTONE_SSL) - -# iSCSI defaults -CEPH_ISCSI_TARGET_IQN=${CEPH_ISCSI_TARGET_IQN:-iqn.1993-08.org.opendev:01:a9aa4032d2c1} -CEPH_ISCSI_API_USER=${CEPH_ISCSI_API_USER:-openstack} -CEPH_ISCSI_API_PASSWORD=${CEPH_ISCSI_API_PASSWORD:-openstack} -CEPH_ISCSI_API_HOST=${CEPH_ISCSI_API_HOST:-$SERVICE_HOST} -CEPH_ISCSI_API_PORT=${CEPH_ISCSI_API_PORT:-5002} -CEPH_ISCSI_GATEWAY_CFG=${CEPH_CONF_DIR}/iscsi-gateway.cfg -CEPH_ISCSI_MINIMUM_GATEWAYS=${CEPH_ISCSI_MINIMUM_GATEWAYS:-1} - -# gwcli requires a pool named rbd -CEPH_ISCSI_POOL="rbd" -CEPH_ISCSI_POOL_PG=${CEPH_ISCSI_POOL_PG:-8} - -# Ceph REST API (for containerized version only) -# Default is 5000, but Keystone already listens on 5000 -CEPH_REST_API_PORT=${CEPH_REST_API_PORT:-5001} - -# Set minimum client version -CEPH_MIN_CLIENT_VERSION=${CEPH_MIN_CLIENT_VERSION} - -# Connect to an existing Ceph cluster -REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) -REMOTE_CEPH_ADMIN_KEY_PATH=\ -${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} -REMOTE_CEPH_RGW=$(trueorfalse False REMOTE_CEPH_RGW) - -if [[ "$TARGET_BRANCH" =~ stable/(ocata|pike) ]]; then - # not supported before Queens - ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False -fi - -# Set INIT_SYSTEM to upstart, systemd, or init. In our domain it should be -# safe to assume that if the init system is not upstart or systemd that it -# is sysvinit rather than other theoretical possibilities like busybox. -INIT_SYSTEM=$(init --version 2>/dev/null | grep -qs upstart && echo upstart \ - || cat /proc/1/comm) - -# Functions -# ------------ - -# Containerized Ceph -function deploy_containerized_ceph { - install_package docker docker.io ceph-common - DOCKER_EXEC="docker exec ceph-demo" - initial_configure_ceph - sudo docker run -d \ - --name ceph-demo \ - --net=host \ - -v ${CEPH_CONF_DIR}:${CEPH_CONF_DIR} \ - -v ${CEPH_DATA_DIR}:${CEPH_DATA_DIR} \ - -e MON_IP=${SERVICE_HOST} \ - -e CEPH_PUBLIC_NETWORK=$(grep -o ${SERVICE_HOST%??}0/.. /proc/net/fib_trie | head -1) \ - -e RGW_CIVETWEB_PORT=${CEPH_RGW_PORT} \ - -e RESTAPI_PORT=${CEPH_REST_API_PORT} \ - ceph/demo - - # wait for ceph to be healthy then continue - ceph_status -} - -function wait_for_daemon { - timeout=20 - daemon_to_test=$1 - while [ $timeout -ne 0 ]; do - if eval $daemon_to_test; then - return 0 - fi - sleep 1 - let timeout=timeout-1 - done - return 1 -} - -function ceph_status { - echo "Waiting for Ceph to be ready" - return $(wait_for_daemon "sudo docker exec ceph-demo ceph health | grep -sq HEALTH_OK") -} - -# is_ceph_enabled_for_service() - checks whether the OpenStack service -# specified as an argument is enabled with Ceph as its storage backend. -function is_ceph_enabled_for_service { - local config config_name enabled service - enabled=1 - service=$1 - # Construct the global variable ENABLE_CEPH_.* corresponding to a - # $service. - config_name=ENABLE_CEPH_$(echo $service | \ - tr '[:lower:]' '[:upper:]' | tr '-' '_') - config=$(eval echo "\$$config_name") - - if (is_service_enabled $service) && [[ $config == 'True' ]]; then - enabled=0 - fi - return $enabled -} - -# _get_ceph_version() - checks version of Ceph mon daemon or CLI based on an -# argument. Checking mon daemon version requires the mon daemon to be up -# and healthy. -function _get_ceph_version { - local ceph_version_str - local mon_started - - if [[ $1 == 'cli' ]]; then - # ceph --version show CLI version - ceph_version_str=$(sudo ceph --version | cut -d ' ' -f 3 | \ - cut -d '.' -f 1,2) - elif [[ $1 == 'mon' ]]; then - # ceph version show mon daemon version - mon_started=$(wait_for_daemon "sudo systemctl is-active --quiet ceph-mon@$(hostname)") - - if $mon_started; then - ceph_version_str=$(sudo ceph version | cut -d ' ' -f 3 | \ - cut -f 1,2 -d '.') - else - die $LINENO "ceph-mon@${hostname} is not running and it's not possible to \ - retrieve it's version" - fi - else - die $LINENO "Invalid argument. The get_ceph_version function needs \ - an argument that can be 'cli' or 'mon'." - fi - - echo $ceph_version_str -} - -# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt -# so it can connect to the Ceph cluster while attaching a Cinder block device -function import_libvirt_secret_ceph { - cat </dev/null - - ${CINDER_CEPH_UUID} - - client.${CINDER_CEPH_USER} secret - - -EOF - sudo virsh secret-define --file secret.xml - sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} \ - --base64 $(sudo ceph -c ${CEPH_CONF_FILE} \ - auth get-key client.${CINDER_CEPH_USER}) - - sudo rm -f secret.xml -} - -# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt -function _undefine_virsh_secret { - if is_ceph_enabled_for_service cinder || \ - is_ceph_enabled_for_service nova; then - local virsh_uuid - virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') - sudo virsh secret-undefine ${virsh_uuid} &>/dev/null - fi -} - -# check_os_support_ceph() - Check if the OS provides a decent version of Ceph -function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (jammy|focal|bionic|xenial|f31|f32|f33|f34|rhel8|rhel9) ]]; then - echo "WARNING: devstack-plugin-ceph hasn't been tested with $DISTRO. \ - Set FORCE_CEPH_INSTALL=yes in your local.conf if you'd like to \ - attempt installation anyway." - if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then - die $LINENO "Not proceeding with install." - fi - fi - - if [[ ! $INIT_SYSTEM == 'systemd' ]]; then - die "This plugin is only supported on systemd enabled systems currently." - fi -} - - -# check_os_support_ceph_iscsi() - Make sure kernel supports iscsi requirements -function check_os_support_for_iscsi { - KERNEL_CONFIG="/boot/config-$(uname -r)" - - target_core=$(grep -E '(CONFIG_TARGET_CORE=m|CONFIG_TARGET_CORE=y)' $KERNEL_CONFIG) - tcm_user=$(grep -E '(CONFIG_TCM_USER2=m|CONFIG_TCM_USER2=y)' $KERNEL_CONFIG) - iscsi_target=$(grep -E '(CONFIG_ISCSI_TARGET=m|CONFIG_ISCSI_TARGET=y)' $KERNEL_CONFIG) - - if [ -z "$target_core" ] || [ -z "$tcm_user" ] || [ -z "$iscsi_target" ]; then - die "Ceph iSCSI cannot work. The required kernel modules are not installed." - fi - -} - - -# cleanup_ceph() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_ceph_remote { - # do a proper cleanup from here to avoid leftover on the remote Ceph cluster - if is_ceph_enabled_for_service glance; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - - sudo ceph -c ${CEPH_CONF_FILE} auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1 - fi - if is_ceph_enabled_for_service cinder; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - - sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_CEPH_USER > /dev/null 2>&1 - fi - if is_ceph_enabled_for_service c-bak; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - - sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1 - fi - if is_ceph_enabled_for_service nova; then - iniset $NOVA_CONF libvirt rbd_secret_uuid "" - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - fi - - # Clean up the disk image and mount that we created - destroy_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} -} - -function cleanup_ceph_embedded { - sudo killall -w -9 ceph-mon ceph-osd ceph-mds - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - sudo killall -w -9 radosgw - fi - sudo rm -rf ${CEPH_DATA_DIR}/*/* - if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then - sudo umount ${CEPH_DATA_DIR} - fi - if [[ -e ${CEPH_DISK_IMAGE} ]]; then - sudo rm -f ${CEPH_DISK_IMAGE} - fi - - # purge ceph config file and keys - sudo rm -rf ${CEPH_CONF_DIR}/* - -} - -function cleanup_ceph_general { - _undefine_virsh_secret - if is_ceph_enabled_for_service manila && [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then - cleanup_nfs_ganesha - cleanup_repo_nfsganesha - fi - if is_ceph_enabled_for_service manila; then - sudo ceph -c ${CEPH_CONF_FILE} fs rm $CEPHFS_FILESYSTEM \ - --yes-i-really-mean-it - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_METADATA_POOL $CEPHFS_METADATA_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_DATA_POOL $CEPHFS_DATA_POOL \ - --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph -c ${CEPH_CONF_FILE} auth del client.$MANILA_CEPH_USER > /dev/null 2>&1 - fi - cleanup_repo_ceph -} - -function cleanup_containerized_ceph { - sudo docker rm -f ceph-demo - sudo rm -rf ${CEPH_CONF_DIR}/* - sudo rm -rf ${CEPH_DATA_DIR} -} - -function initial_configure_ceph { - # create a backing file disk - create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} - - # populate ceph directory - sudo mkdir -p \ - ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,bootstrap-mgr,bootstrap-rgw,mgr,rgw,mds,mon,osd,tmp,radosgw} -} -# configure_ceph() - Set config files, create data dirs, etc -function configure_ceph { - local count=0 - - initial_configure_ceph - - # create ceph monitor initial key and directory - sudo ceph-authtool ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) \ - --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ - --cap mon 'allow *' - - # gen admin keyring, gen client.admin user and add user to keyring - sudo ceph-authtool ${CEPH_CONF_DIR}/ceph.client.admin.keyring \ - --create-keyring --gen-key -n client.admin --cap mon 'allow *' \ - --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' - - # add gen keys to ceph.mon.keyring - sudo ceph-authtool ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) \ - --import-keyring ${CEPH_CONF_DIR}/ceph.client.admin.keyring - - sudo mkdir -p ${CEPH_DATA_DIR}/mon/ceph-$(hostname) - - # create a default ceph configuration file - iniset -sudo ${CEPH_CONF_FILE} global "fsid" "${CEPH_FSID}" - iniset -sudo ${CEPH_CONF_FILE} global "mon_initial_members" "$(hostname)" - iniset -sudo ${CEPH_CONF_FILE} global "mon_host" "${SERVICE_HOST}" - iniset -sudo ${CEPH_CONF_FILE} global "auth_cluster_required" "cephx" - iniset -sudo ${CEPH_CONF_FILE} global "auth_service_required" "cephx" - iniset -sudo ${CEPH_CONF_FILE} global "auth_client_required" "cephx" - iniset -sudo ${CEPH_CONF_FILE} global "filestore_xattr_use_omap" "true" - iniset -sudo ${CEPH_CONF_FILE} global "osd crush chooseleaf type" "0" - iniset -sudo ${CEPH_CONF_FILE} global "osd journal size" "100" - iniset -sudo ${CEPH_CONF_FILE} global "osd pool default size" "${CEPH_REPLICAS}" - iniset -sudo ${CEPH_CONF_FILE} global "rbd default features" "${CEPH_RBD_DEFAULT_FEATURES}" - iniset -sudo ${CEPH_CONF_FILE} client "debug_client" "10" - - local gigs - gigs=$(echo $CEPH_LOOPBACK_DISK_SIZE | grep -o '^[0-9]*') - iniset -sudo ${CEPH_CONF_FILE} global "bluestore_block_size" $((($gigs - 4) << 30)) - - # bootstrap the ceph monitor - sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \ - --keyring ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) - - sudo chown -R ceph. ${CEPH_DATA_DIR} - - sudo systemctl enable ceph-mon@$(hostname) - sudo systemctl start ceph-mon@$(hostname) - - local ceph_version - ceph_version=$(_get_ceph_version mon) - - if vercmp "$ceph_version" ">=" "14.0"; then - for key in bootstrap-{mds,osd,rgw}; do - sudo ceph auth get client.$key -o ${CEPH_DATA_DIR}/$key/ceph.keyring - done - fi - - sudo mkdir -p ${CEPH_DATA_DIR}/mgr/ceph-${MGR_ID} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mgr.${MGR_ID} \ - mon 'allow profile mgr' mds 'allow *' osd 'allow *' \ - -o ${CEPH_DATA_DIR}/mgr/ceph-${MGR_ID}/keyring - sudo chown -R ceph. ${CEPH_DATA_DIR}/mgr - - # create a simple rule to take OSDs instead of hosts with CRUSH - # then apply this rule to the default pool - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} \ - osd crush rule create-simple devstack default osd - - RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} \ - osd crush rule dump devstack | \ - awk '/rule_id/ {print $2}' | \ - cut -d ',' -f1) - - sudo ceph -c ${CEPH_CONF_FILE} \ - osd pool set rbd crush_ruleset ${RULE_ID} - sudo ceph -c ${CEPH_CONF_FILE} \ - osd pool set data crush_ruleset ${RULE_ID} - sudo ceph -c ${CEPH_CONF_FILE} \ - osd pool set metadata crush_ruleset ${RULE_ID} - fi - - # create the OSD(s) - for rep in ${CEPH_REPLICAS_SEQ}; do - OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create) - sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \ - mon 'allow profile osd ' osd 'allow *' | \ - sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring - sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} - sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring - sudo ceph-osd -c ${CEPH_CONF_FILE} --setuser ceph --setgroup ceph -i ${OSD_ID} --mkfs - sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs - - sudo systemctl enable ceph-osd@${OSD_ID} - done - - if [ ! -z "$CEPH_MIN_CLIENT_VERSION" ]; then - sudo ceph -c ${CEPH_CONF_FILE} \ - osd set-require-min-compat-client ${CEPH_MIN_CLIENT_VERSION} - fi - - if is_ceph_enabled_for_service manila; then - # create a MDS - sudo mkdir -p ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID} - sudo chown ceph. ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mds.${MDS_ID} \ - mon 'allow profile mds ' osd 'allow rw' mds 'allow' \ - -o ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring - sudo chown ceph. ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring - - sudo systemctl enable ceph-mds@${MDS_ID} - fi - - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - _configure_ceph_rgw - fi - - if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - _configure_ceph_iscsi - fi -} - -function _configure_rgw_ceph_section { - configure_ceph_embedded_rgw_paths - - iniset -sudo ${CEPH_CONF_FILE} ${key} "host" "$(hostname)" - iniset -sudo ${CEPH_CONF_FILE} ${key} "keyring" "${dest}/keyring" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw socket path" "/tmp/radosgw-$(hostname).sock" - iniset -sudo ${CEPH_CONF_FILE} ${key} "log file" "/var/log/ceph/radosgw-$(hostname).log" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw data" "${dest}" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw print continue" "false" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw frontends" "civetweb port=${CEPH_RGW_PORT}" - - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone url" "$KEYSTONE_SERVICE_URI" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw s3 auth use keystone" "true" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin user" "radosgw" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin password" "$SERVICE_PASSWORD" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone accepted roles" "Member, _member_, admin, ResellerAdmin" - - if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then - iniset -sudo ${CEPH_CONF_FILE} ${key} "nss db path" "${dest}/nss" - else - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone verify ssl" "false" - fi - - if [[ $CEPH_RGW_IDENTITY_API_VERSION == '2.0' ]]; then - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin tenant" "$SERVICE_PROJECT_NAME" - else - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin project" "$SERVICE_PROJECT_NAME" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin domain" "$SERVICE_DOMAIN_NAME" - iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone api version" "3" - fi -} - -function _configure_ceph_rgw_container { - _configure_rgw_ceph_section - sudo docker restart ceph-demo -} - -function _configure_ceph_rgw { - # bootstrap rados gateway - _configure_rgw_ceph_section - sudo mkdir -p $dest - sudo ceph auth get-or-create $key \ - osd 'allow rwx' mon 'allow rw' \ - -o ${dest}/keyring - - sudo systemctl enable ceph-radosgw@rgw.$(hostname) - - sudo chown -R ceph. ${CEPH_DATA_DIR} -} - -function _configure_ceph_iscsi_gateway { - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "minimum_gateways" $CEPH_ISCSI_MINIMUM_GATEWAYS - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "trusted_ip_list" "$HOST_IP,localhost" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "cluster_name" "ceph" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "gateway_keyring" "ceph.client.admin.keyring" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "prometheus_host" "$CEPH_ISCSI_API_HOST" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_secure" "false" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_user" "$CEPH_ISCSI_API_USER" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_password" "$CEPH_ISCSI_API_PASSWORD" - iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_port" "$CEPH_ISCSI_API_PORT" -} - -function _configure_ceph_iscsi { - _configure_ceph_iscsi_gateway - sudo ceph -c ${CEPH_CONF_FILE} \ - osd pool create ${CEPH_ISCSI_POOL} ${CEPH_ISCSI_POOL_PG} - - sudo systemctl daemon-reload - sudo systemctl enable tcmu-runner - sudo systemctl enable rbd-target-gw - sudo systemctl enable rbd-target-api -} - -function _post_start_configure_iscsi_gateway { - # Now we setup the rbd-target-gw and rbd-target-api for use - GWCLI=$(which gwcli) - removeme=$(sudo systemctl status rbd-target-api) - FQDN=$(hostname -f) - - # create the target_iqn for exporting all volumes - sudo $GWCLI /iscsi-targets create $CEPH_ISCSI_TARGET_IQN - - # now we add the gateway definition - # Didn't find the gateway, so lets create it - sudo $GWCLI /iscsi-targets/$CEPH_ISCSI_TARGET_IQN/gateways create $FQDN $HOST_IP skipchecks=true -} - -function start_ceph_iscsi { - sudo systemctl start tcmu-runner - sudo systemctl start rbd-target-gw - sudo systemctl start rbd-target-api - sleep 10 - - # we have to setup the gateway and api after they start - _post_start_configure_iscsi_gateway -} - -function stop_ceph_iscsi { - GWCLI=$(which gwcli) - FQDN=$(hostname -f) - sudo $GWCLI /iscsi-targets/$CEPH_ISCSI_TARGET_IQN/gateways delete $FQDN confirm=true - sudo $GWCLI /iscsi-targets delete $CEPH_ISCSI_TARGET_IQN - - sudo systemctl stop rbd-target-api - sudo systemctl stop rbd-target-gw - sudo systemctl stop tcmu-runner -} - -function _create_swift_endpoint { - - local swift_service - swift_service=$(get_or_create_service "swift" "object-store" "Swift Service") - - local swift_endpoint - swift_endpoint="$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:${CEPH_RGW_PORT}/swift/v1" - - get_or_create_endpoint $swift_service \ - "$REGION_NAME" $swift_endpoint $swift_endpoint $swift_endpoint -} - -function configure_ceph_embedded_rgw_paths { - if [[ "$CEPH_CONTAINERIZED" == "True" ]]; then - dest=${CEPH_DATA_DIR}/radosgw/$(hostname) - key=client.radosgw.gateway - else - dest=${CEPH_DATA_DIR}/radosgw/ceph-rgw.$(hostname) - key=client.rgw.$(hostname) - fi -} - -function configure_ceph_embedded_rgw { - configure_ceph_embedded_rgw_paths - # keystone endpoint for radosgw - _create_swift_endpoint - - # Create radosgw service user with admin privileges - create_service_user "radosgw" "admin" - - if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then - # radosgw needs to access keystone's revocation list - sudo mkdir -p ${dest}/nss - sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \ - sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw" - - sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \ - sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P" - fi -} - -function start_ceph_embedded_rgw { - sudo systemctl enable ceph-radosgw@rgw.$(hostname) - sudo systemctl start ceph-radosgw@rgw.$(hostname) -} - -function configure_ceph_embedded_glance { - # configure Glance service options, ceph pool, ceph user and ceph key - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \ - set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID} - fi -} - -# configure_ceph_glance() - Glance config needs to come after Glance is set up -function configure_ceph_glance { - if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then - # common glance accounts for swift - create_service_user "glance-swift" "ResellerAdmin" - iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift - - AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_IDENTITY_API_VERSION - - iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL - iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME - iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_IDENTITY_API_VERSION - - iniset $GLANCE_API_CONF glance_store default_store swift - iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True - - iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF - iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 - iniset $GLANCE_API_CONF glance_store stores "file, http, swift" - - else - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \ - ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP} - - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth \ - get-or-create client.${GLANCE_CEPH_USER} \ - mon "allow r" \ - osd "allow class-read object_prefix rbd_children, \ - allow rx pool=${CINDER_CEPH_POOL}, \ - allow rx pool=${NOVA_CEPH_POOL}, \ - allow rwx pool=${GLANCE_CEPH_POOL}" | \ - sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring - - sudo chown ${STACK_USER}:$(id -g -n $whoami) \ - ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring - - iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True - iniset $GLANCE_API_CONF glance_store default_store rbd - iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" - iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE - iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER - iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL - fi -} - -function configure_ceph_manila { - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_METADATA_POOL} \ - ${CEPHFS_POOL_PG} - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_DATA_POOL} \ - ${CEPHFS_POOL_PG} - if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} == 'True' ]]; then - sudo ceph -c ${CEPH_CONF_FILE} fs flag set enable_multiple true \ - --yes-i-really-mean-it - fi - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} fs new ${CEPHFS_FILESYSTEM} ${CEPHFS_METADATA_POOL} \ - ${CEPHFS_DATA_POOL} - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \ - client.${MANILA_CEPH_USER} \ - mon "allow *" osd "allow rw" mds "allow *" mgr "allow *" \ - -o ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring - sudo chown ${STACK_USER}:$(id -g -n $whoami) \ - ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring - # Enable snapshots in CephFS. - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} fs set ${CEPHFS_FILESYSTEM} allow_new_snaps true \ - --yes-i-really-mean-it - - # Make manila's libcephfs client a root user. - iniset -sudo ${CEPH_CONF_FILE} client.${MANILA_CEPH_USER} "client mount uid" "0" - iniset -sudo ${CEPH_CONF_FILE} client.${MANILA_CEPH_USER} "client mount gid" "0" - - if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then - configure_nfs_ganesha - start_nfs_ganesha - fi - -# RESTART DOCKER CONTAINER - -} - -function configure_ceph_embedded_manila { - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_DATA_POOL} \ - crush_ruleset ${RULE_ID} - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_METADATA_POOL} \ - crush_ruleset ${RULE_ID} - fi -} - -function configure_ceph_embedded_nova { - # configure Nova service options, ceph pool, ceph user and ceph key - - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \ - set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} - fi -} - -# configure_ceph_nova() - Nova config needs to come after Nova is set up -function configure_ceph_nova { - - # When REMOTE_CEPH=True is set on subnodes skip the creation of the nova - # pool as it has already been created on the controller that has - # REMOTE_CEPH=False. - if [[ "$REMOTE_CEPH" == "False" ]]; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \ - ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP} - fi - - iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} - iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} - iniset $NOVA_CONF libvirt inject_key false - iniset $NOVA_CONF libvirt inject_partition -2 - iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" - iniset $NOVA_CONF libvirt images_type rbd - iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} - iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} - - if ! is_ceph_enabled_for_service cinder; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} \ - auth get-or-create client.${CINDER_CEPH_USER} \ - mon "allow r" \ - osd "allow class-read object_prefix rbd_children, \ - allow rwx pool=${CINDER_CEPH_POOL}, \ - allow rwx pool=${NOVA_CEPH_POOL}, \ - allow rwx pool=${GLANCE_CEPH_POOL}" | \ - sudo tee \ - ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring \ - > /dev/null - - sudo chown ${STACK_USER}:$(id -g -n $whoami) \ - ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - fi -} - -function configure_ceph_embedded_cinder { - # Configure Cinder service options, ceph pool, ceph user and ceph key - - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \ - set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} - fi -} - -# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up -function configure_ceph_cinder { - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \ - ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} - - sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \ - client.${CINDER_CEPH_USER} \ - mon "allow r" \ - osd "allow class-read object_prefix rbd_children, \ - allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL}, \ - allow rwx pool=${GLANCE_CEPH_POOL}" | \ - sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - - sudo chown ${STACK_USER}:$(id -g -n $whoami) \ - ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - - sudo rbd pool init ${CINDER_CEPH_POOL} -} - -# install_ceph_remote() - Collect source and prepare -function install_ceph_remote { - install_package ceph-common - # ceph-common in Bionic (18.04) installs only the python2 variants of - # required packages, meaning we need to install the python3 variants - # manually. Hopefully this won't be necessary in Focal (20.04) - # https://packages.ubuntu.com/bionic/ceph-common - if python3_enabled; then - install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests - fi - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - if is_ubuntu; then - sudo rm -rf /usr/lib/python3/dist-packages/logutils*.egg-info - fi - if is_fedora; then - sudo rm -rf /usr/lib64/python3*/site-packages/logutils*.egg-info - fi - -} - -function dnf_add_repository_ceph { - local ceph_release=$1 - local package_release=$2 - - cat > ceph.repo < -# - package_release: to override the os_RELEASE variable -function configure_repo_ceph { - - package_release=${1:-$os_RELEASE} - - if is_ubuntu; then - if [[ "${TEST_MASTER}" == "True" ]]; then - repo_file_name="/etc/apt/sources.list.d/ceph-master.list" - sudo wget -c "https://shaman.ceph.com/api/repos/ceph/master/latest/ubuntu/${package_release}/flavors/default/repo" -O ${repo_file_name} - else - wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add - - sudo apt-add-repository -y "deb https://download.ceph.com/debian-${CEPH_RELEASE}/ $package_release main" - fi - sudo apt-get -y update - elif is_fedora; then - package_release="el"${package_release} - if [[ "${TEST_MASTER}" == "True" ]]; then - repo_file_name="/etc/yum.repos.d/ceph-master.repo" - sudo wget -c "https://shaman.ceph.com/api/repos/ceph/master/latest/centos/${package_release}/flavors/default/repo" -O ${repo_file_name} - sudo dnf config-manager --add-repo ${repo_file_name} - else - dnf_add_repository_ceph ${CEPH_RELEASE} ${package_release} - fi - fi -} - -# cleanup_repo_ceph() - Remove Ceph repositories -# Usage: cleanup_repo_ceph -function cleanup_repo_ceph { - if is_ubuntu; then - sudo apt-add-repository -r -y "deb https://download.ceph.com/debian-${CEPH_RELEASE}/ ${os_CODENAME} main" - elif is_fedora; then - sudo rm -rf /etc/yum.repos.d/ceph.repo - fi -} - -function setup_packages_for_manila_on_ubuntu { - CEPH_PACKAGES="${CEPH_PACKAGES} ceph-mds libcephfs2" - - if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then - install_nfs_ganesha - fi - - if python3_enabled; then - CEPH_PACKAGES="${CEPH_PACKAGES} python3-cephfs" - fi -} - -function setup_packages_for_manila_on_fedora_family { - if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then - install_nfs_ganesha - fi -} - -function install_ceph { - - if is_ubuntu; then - # NOTE(vkmc) Dependencies for setting up repos - install_package software-properties-common - - CEPH_PACKAGES="ceph libnss3-tools" - if python3_enabled; then - CEPH_PACKAGES="${CEPH_PACKAGES} python3-rados python3-rbd" - fi - - if is_ceph_enabled_for_service manila; then - setup_packages_for_manila_on_ubuntu - fi - - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - CEPH_PACKAGES="${CEPH_PACKAGES} radosgw" - fi - - if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - # Only Ubuntu Focal have the required packages. Using distro provided - # packages might be more stable for CI. - if [[ $os_CODENAME =~ focal ]]; then - CEPH_PACKAGES="${CEPH_PACKAGES} ceph-iscsi targetcli-fb " - else - die $LINENO "For ubuntu, ceph iscsi only support focal now" - fi - fi - - install_package ${CEPH_PACKAGES} - elif is_fedora; then - override_os_release="" - if ! [[ $os_VENDOR =~ Fedora ]] && [[ $os_RELEASE =~ (31|32) ]]; then - die $LINENO "Supported for Fedora 31 and 32. Not supported for other releases." - override_os_release="8" - fi - - # NOTE(lyarwood) Use the py3 el8 packages on Fedora - configure_repo_ceph ${override_os_release} - - CEPH_PACKAGES="ceph" - - if is_ceph_enabled_for_service manila; then - setup_packages_for_manila_on_fedora_family - fi - - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - CEPH_PACKAGES="${CEPH_PACKAGES} ceph-radosgw" - fi - - if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - # TODO(xinliang): Install shaman ceph iscsi gateway packages like ceph-ansible: - # https://github.com/ceph/ceph-ansible/blob/6dd9b255656b7124f8963cf65a862930fa28d162/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml#L2 - die $LINENO "Ceph iscsi gateway is not supported for ${os_VENDOR} distro yet" - fi - - install_package ${CEPH_PACKAGES} - else - die $LINENO "${os_VENDOR} is not supported by the Ceph plugin for Devstack" - fi - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - if is_ubuntu; then - sudo rm -rf /usr/lib/python3/dist-packages/logutils*.egg-info - fi - if is_fedora; then - sudo rm -rf /usr/lib64/python3*/site-packages/logutils*.egg-info - fi - -} - -# start_ceph() - Start running processes, including screen -function start_ceph { - sudo chown -R ceph. ${CEPH_DATA_DIR} - - sudo systemctl start ceph-mon@$(hostname) - local ceph_version - ceph_version=$(_get_ceph_version mon) - - sudo systemctl start ceph-mgr@${MGR_ID} - # use `tell mgr` as the mgr might not have been activated - # yet to register the python module commands. - if ! sudo ceph -c ${CEPH_CONF_FILE} tell mgr restful create-self-signed-cert; then - echo MGR Restful is not working, perhaps the package is not installed? - fi - for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do - sudo systemctl start ceph-osd@$id - done - if is_ceph_enabled_for_service manila; then - sudo systemctl start ceph-mds@${MDS_ID} - fi - - if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - start_ceph_iscsi - fi -} - -# stop_ceph() - Stop running processes (non-screen) -function stop_ceph { - - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - sudo systemctl stop ceph-radosgw@rgw.$(hostname) - sudo systemctl disable ceph-radosgw@rgw.$(hostname) - fi - if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - stop_ceph_iscsi - fi - if is_ceph_enabled_for_service manila; then - if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then - stop_nfs_ganesha - fi - sudo systemctl stop ceph-mds@${MDS_ID} - sudo systemctl disable ceph-mds@${MDS_ID} - fi - - # if mon is dead or unhealthy we won't get the list - # of osds but should continue anyways. - ids=$(sudo ceph -c ${CEPH_CONF_FILE} osd ls 2>/dev/null --connect-timeout 5) - for id in $ids; do - sudo systemctl stop ceph-osd@$id - sudo systemctl disable ceph-osd@$id - done - - sudo systemctl stop ceph-mgr@${MGR_ID} - sudo systemctl disable ceph-mgr@${MGR_ID} - - # In nautilus we have the new ceph-crash service for monitoring - # Try to stop it. If there is no service, stop/disable do nothing - sudo systemctl stop ceph-crash - sudo systemctl disable ceph-crash - - sudo systemctl stop ceph-mon@$(hostname) - sudo systemctl disable ceph-mon@$(hostname) - - # NOTE(vkmc) Cleanup any leftover unit files - sudo rm -f /etc/systemd/system/ceph* - - # Clean up CEPH_DATA_DIR - sudo umount ${CEPH_DATA_DIR} - # devstack create_disk function adds an entry in /etc/fstab - # that doesn't always get cleaned up on restacks so clean it - # up here. Use a pattern that escapes the '/' path delimiters - # in the expresssion given to the sed command. - pattern=$(echo $CEPH_DATA_DIR | sed 's_/_\\/_g') - sudo sed -i "/$pattern/d" /etc/fstab - sudo rm -rf ${CEPH_DATA_DIR}/* -} - -# Restore xtrace -$XTRACE - -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/lib/cephadm b/devstack/lib/cephadm index 85f823dd..fa336f5d 100755 --- a/devstack/lib/cephadm +++ b/devstack/lib/cephadm @@ -383,7 +383,7 @@ function _install_and_configure_clustered_nfs { } function _install_and_configure_standalone_nfs { - source $CEPH_PLUGIN_DIR/lib/common + source $CEPH_PLUGIN_DIR/lib/nfs-ganesha install_nfs_ganesha configure_nfs_ganesha start_nfs_ganesha diff --git a/devstack/lib/common b/devstack/lib/nfs-ganesha similarity index 100% rename from devstack/lib/common rename to devstack/lib/nfs-ganesha diff --git a/devstack/override-defaults b/devstack/override-defaults index 79f09d8d..9ec4d0e4 100644 --- a/devstack/override-defaults +++ b/devstack/override-defaults @@ -8,20 +8,9 @@ ENABLE_CEPH_NOVA=$(trueorfalse True ENABLE_CEPH_NOVA) # Do not enable RGW by default as RGW is not tested in upstream CI. ENABLE_CEPH_RGW=$(trueorfalse False ENABLE_CEPH_RGW) -if [[ $ENABLE_CEPH_CINDER == "True" ]]; then - if [[ $ENABLE_CEPH_ISCSI == "True" ]]; then - CINDER_DRIVER=${CINDER_DRIVER:-ceph_iscsi} - CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph_iscsi} - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} - else - CINDER_DRIVER=${CINDER_DRIVER:-ceph} - CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph} - fi -fi - +CINDER_DRIVER=${CINDER_DRIVER:-ceph} +CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph} +REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) if [[ $ENABLE_CEPH_MANILA == "True" ]]; then MANILA_CEPH_DRIVER=${MANILA_CEPH_DRIVER:-cephfsnative} fi - -CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY) -REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ccd8c3cd..e6a251ab 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -1,184 +1,38 @@ -# ceph.sh - DevStack extras script to install Ceph - +# cephadm.sh - DevStack extras script to install Ceph if [[ "$1" == "source" ]]; then # Initial source - if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - source $TOP_DIR/lib/cephadm - else - source $TOP_DIR/lib/ceph - fi + source $TOP_DIR/lib/cephadm elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then die $LINENO \ "You cannot activate both Swift and Ceph Rados Gateway, \ please disable Swift or set ENABLE_CEPH_RGW=False" fi - if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - # Set up system services - echo_summary "[cephadm] Configuring system services ceph" - pre_install_ceph - else - echo_summary "Installing Ceph" - check_os_support_ceph - if [ "$REMOTE_CEPH" = "False" ]; then - if [ "$CEPH_CONTAINERIZED" = "True" ]; then - echo_summary "Configuring and initializing Ceph" - deploy_containerized_ceph - else - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): we do everything here - # because we need to have Ceph started before the main - # OpenStack components. - # Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - start_ceph - fi - else - install_ceph_remote - fi - fi + echo_summary "[cephadm] Configuring system services ceph" + pre_install_ceph elif [[ "$1" == "stack" && "$2" == "install" ]]; then - if [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "False" ]]; then + if [[ "$REMOTE_CEPH" = "False" ]]; then # Perform installation of service source echo_summary "[cephadm] Installing ceph" install_ceph set_min_client_version - elif [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "True" ]]; then + else echo "[CEPHADM] Remote Ceph: Skipping install" get_cephadm - else - # FIXME(melwitt): This is a hack to get around a namespacing issue with - # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages - # and the Ceph packages in the Pike UCA are pulling in python-paste and - # python-pastedeploy packages. The python-pastedeploy package satisfies the - # upper-constraints but python-paste does not, so devstack pip installs a - # newer version of it, while python-pastedeploy remains. The mismatch - # between the install path of paste and paste.deploy causes Keystone to - # fail to start, with "ImportError: cannot import name deploy." - # TODO(frickler): This is needed for all branches currently. - pip_install -U --force PasteDeploy - install_package python-is-python3 fi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - # Configure after the other layer 1 and 2 services have been configured - echo_summary "[cephadm] Configuring additional Ceph services" - configure_ceph - else - if is_ceph_enabled_for_service glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_glance - fi - if is_ceph_enabled_for_service nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_nova - fi - if is_ceph_enabled_for_service cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_cinder - fi - if is_ceph_enabled_for_service nova || \ - is_ceph_enabled_for_service cinder; then - # NOTE (leseb): the part below is a requirement - # to attach Ceph block devices - echo_summary "Configuring libvirt secret" - import_libvirt_secret_ceph - fi - if is_ceph_enabled_for_service manila; then - echo_summary "Configuring Manila for Ceph" - configure_ceph_manila - fi - - if [ "$REMOTE_CEPH" = "False" ]; then - if is_ceph_enabled_for_service glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_embedded_glance - fi - if is_ceph_enabled_for_service nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_embedded_nova - fi - if is_ceph_enabled_for_service cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_embedded_cinder - fi - if is_ceph_enabled_for_service manila; then - echo_summary "Configuring Manila for Ceph" - configure_ceph_embedded_manila - fi - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - echo_summary "Configuring Rados Gateway with Keystone for Swift" - configure_ceph_embedded_rgw - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - start_ceph_embedded_rgw - else - _configure_ceph_rgw_container - fi - fi - fi - fi + echo_summary "[cephadm] Configuring additional Ceph services" + configure_ceph if [[ "$MDS_LOGS" == "True" ]]; then enable_verbose_mds_logging fi elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then if is_service_enabled tempest; then iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume False - - # Only enable shelve testing for branches which have the fix for - # nova bug 1653953. - if [[ "$TARGET_BRANCH" =~ stable/(ocata|pike) ]]; then - iniset $TEMPEST_CONFIG compute-feature-enabled shelve False - else - iniset $TEMPEST_CONFIG compute-feature-enabled shelve True - fi - # Attached volume extend support for rbd was introduced in Stein by - # I5698e451861828a8b1240d046d1610d8d37ca5a2 - if [[ "$TARGET_BRANCH" =~ stable/(ocata|pike|queens|rocky) ]]; then - iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume False - else - iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume True - fi - # Volume revert to snapshot support for rbd was introduced in Ussuri by - # If8a5eb3a03e18f9043ff29f7648234c9b46376a0 - if [[ "$TARGET_BRANCH" =~ stable/(ocata|pike|queens|rocky|stein|train) ]]; then - iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert False - else - iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert True - fi - fi -fi - - - -if [[ "$1" == "unstack" ]]; then - if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - cleanup_ceph - else - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - stop_ceph - cleanup_ceph_embedded - fi - else - cleanup_containerized_ceph - fi - cleanup_ceph_general - fi -fi - -if [[ "$1" == "clean" ]]; then - if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - cleanup_ceph - else - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - cleanup_ceph_embedded - fi - cleanup_ceph_general + iniset $TEMPEST_CONFIG compute-feature-enabled shelve True + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume True + iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert True fi +elif [[ "$1" == "unstack" || "$1" == "clean" ]]; then + cleanup_ceph fi diff --git a/devstack/settings b/devstack/settings index ef9e4dbc..12a3894d 100644 --- a/devstack/settings +++ b/devstack/settings @@ -5,17 +5,7 @@ CEPH_PLUGIN_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})) # Add ceph plugin specific settings -# NOTE: Currently these are redundant since ceph -# plugin job defn defines them already, but -# once DEVSTACK_GATE_CEPH is removed, these -# won't be redundant, so its ok to have these -# anyways. - -if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then - TEMPEST_STORAGE_PROTOCOL=iSCSI -else - TEMPEST_STORAGE_PROTOCOL=ceph -fi +TEMPEST_STORAGE_PROTOCOL=ceph # VOLUME_BACKING_FILE_SIZE should be sourced from devstack/stackrc but define # a default here if not set already. VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-8GB} @@ -23,13 +13,7 @@ CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$VOLUME_BACKING_FILE_SIZE} # Disable manage/unmanage snapshot tests on Tempest TEMPEST_VOLUME_MANAGE_SNAPSHOT=False -# Source plugin's lib/cephadm or lib/ceph -# depending on chosen deployment method -if [[ "$CEPHADM_DEPLOY" = "True" ]]; then - source $CEPH_PLUGIN_DIR/lib/cephadm -else - source $CEPH_PLUGIN_DIR/lib/ceph -fi +source $CEPH_PLUGIN_DIR/lib/cephadm # Set Manila related global variables used by Manila's DevStack plugin. if (is_ceph_enabled_for_service manila); then