devstack/lib/ceph
Ian Wienand 523f488036 Namespace XTRACE commands
I noticed this when debugging some grenade issues failures.

An include of grenade/functions stores the current value of XTRACE
(on) and disables xtrace for the rest of the import.

We then include devstack's "functions" library, which now overwrites
the stored value of XTRACE the current state; i.e. disabled.

When it finishes it restores the prior state (disabled), and then
grenade restores the same value of XTRACE (disabled).

The result is that xtrace is incorrectly disabled until the next time
it just happens to be turned on.

The solution is to name-space the store of the current-value of xtrace
so when we finish sourcing a file, we always restore the tracing value
to what it was when we entered.

Some files had already discovered this.  In general there is
inconsistency around the setting of the variable, and a lot of obvious
copy-paste.  This brings consistency across all files by using
_XTRACE_* prefixes for the sotre/restore of tracing values.

Change-Id: Iba7739eada5711d9c269cb4127fa712e9f961695
2015-11-27 15:36:04 +11:00

383 lines
14 KiB
Bash

#!/bin/bash
#
# lib/ceph
# Functions to control the configuration and operation of the **Ceph** storage service
# Dependencies:
#
# - ``functions`` file
# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
#
# - install_ceph
# - configure_ceph
# - init_ceph
# - start_ceph
# - stop_ceph
# - cleanup_ceph
# Save trace setting
_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
# Default is the common DevStack data directory.
CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
# Default is ``/etc/ceph``.
CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
# DevStack will create a loop-back disk formatted as XFS to store the
# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
# kilobytes.
# Default is 1 gigabyte.
CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
# Common
CEPH_FSID=$(uuidgen)
CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
# Glance
GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
# Nova
NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
# Cinder
CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
# configured for your Ceph cluster. By default we are configuring
# only one replica since this is way less CPU and memory intensive. If
# you are planning to test Ceph replication feel free to increase this value
CEPH_REPLICAS=${CEPH_REPLICAS:-1}
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
# Connect to an existing Ceph cluster
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
# Cinder encrypted volume tests are not supported with a Ceph backend due to
# bug 1463525.
ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
# Functions
# ------------
function get_ceph_version {
local ceph_version_str
ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
echo $ceph_version_str
}
# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
# so it can connect to the Ceph cluster while attaching a Cinder block device
function import_libvirt_secret_ceph {
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${CINDER_CEPH_UUID}</uuid>
<usage type='ceph'>
<name>client.${CINDER_CEPH_USER} secret</name>
</usage>
</secret>
EOF
sudo virsh secret-define --file secret.xml
sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
sudo rm -f secret.xml
}
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function undefine_virsh_secret {
if is_service_enabled cinder || is_service_enabled nova; then
local virsh_uuid
virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
fi
}
# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
function check_os_support_ceph {
if [[ ! ${DISTRO} =~ (trusty|f21|f22|f23) ]]; then
echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
fi
NO_UPDATE_REPOS=False
fi
}
# cleanup_ceph() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceph_remote {
# do a proper cleanup from here to avoid leftover on the remote Ceph cluster
if is_service_enabled glance; then
sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled cinder; then
sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled c-bak; then
sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled nova; then
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
fi
}
function cleanup_ceph_embedded {
sudo killall -w -9 ceph-mon
sudo killall -w -9 ceph-osd
sudo rm -rf ${CEPH_DATA_DIR}/*/*
if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
sudo umount ${CEPH_DATA_DIR}
fi
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
# purge ceph config file and keys
sudo rm -rf ${CEPH_CONF_DIR}/*
}
function cleanup_ceph_general {
undefine_virsh_secret
}
# configure_ceph() - Set config files, create data dirs, etc
function configure_ceph {
local count=0
# create a backing file disk
create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
# populate ceph directory
sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
# create ceph monitor initial key and directory
sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
--create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
--cap mon 'allow *'
sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
# create a default ceph configuration file
sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
[global]
fsid = ${CEPH_FSID}
mon_initial_members = $(hostname)
mon_host = ${SERVICE_HOST}
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true
osd crush chooseleaf type = 0
osd journal size = 100
EOF
# bootstrap the ceph monitor
sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
--keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
if is_ubuntu; then
sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
sudo initctl emit ceph-mon id=$(hostname)
else
sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
sudo service ceph start mon.$(hostname)
fi
# wait for the admin key to come up otherwise we will not be able to do the actions below
until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
echo_summary "Waiting for the Ceph admin key to be ready..."
count=$(($count + 1))
if [ $count -eq 3 ]; then
die $LINENO "Maximum of 3 retries reached"
fi
sleep 5
done
# pools data and metadata were removed in the Giant release so depending on the version we apply different commands
local ceph_version
ceph_version=$(get_ceph_version)
# change pool replica size according to the CEPH_REPLICAS set by the user
if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
else
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
fi
# create a simple rule to take OSDs instead of host with CRUSH
# then apply this rules to the default pool
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
fi
# create the OSD(s)
for rep in ${CEPH_REPLICAS_SEQ}; do
OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
mon 'allow profile osd ' osd 'allow *' | \
sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
# ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
# 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
# from the init script.
if is_ubuntu; then
sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
else
sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
fi
done
}
function configure_ceph_embedded_glance {
# configure Glance service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_glance() - Glance config needs to come after Glance is set up
function configure_ceph_glance {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
iniset $GLANCE_API_CONF glance_store default_store rbd
iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
}
function configure_ceph_embedded_nova {
# configure Nova service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_nova() - Nova config needs to come after Nova is set up
function configure_ceph_nova {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
iniset $NOVA_CONF libvirt inject_partition -2
iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
iniset $NOVA_CONF libvirt images_type rbd
iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
if ! is_service_enabled cinder; then
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
fi
}
function configure_ceph_embedded_cinder {
# Configure Cinder service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
function configure_ceph_cinder {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
}
# init_ceph() - Initialize databases, etc.
function init_ceph {
# clean up from previous (possibly aborted) runs
# make sure to kill all ceph processes first
sudo pkill -f ceph-mon || true
sudo pkill -f ceph-osd || true
}
# install_ceph() - Collect source and prepare
function install_ceph_remote {
install_package ceph-common
}
function install_ceph {
install_package ceph
}
# start_ceph() - Start running processes, including screen
function start_ceph {
if is_ubuntu; then
sudo initctl emit ceph-mon id=$(hostname)
for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
sudo start ceph-osd id=${id}
done
else
sudo service ceph start
fi
}
# stop_ceph() - Stop running processes (non-screen)
function stop_ceph {
if is_ubuntu; then
sudo service ceph-mon-all stop > /dev/null 2>&1
sudo service ceph-osd-all stop > /dev/null 2>&1
else
sudo service ceph stop > /dev/null 2>&1
fi
}
# Restore xtrace
$_XTRACE_LIB_CEPH
## Local variables:
## mode: shell-script
## End: