diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh new file mode 100644 index 0000000000..5fb34ea89b --- /dev/null +++ b/extras.d/60-ceph.sh @@ -0,0 +1,44 @@ +# ceph.sh - DevStack extras script to install Ceph + +if is_service_enabled ceph; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/ceph + elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + echo_summary "Installing Ceph" + install_ceph + echo_summary "Configuring Ceph" + configure_ceph + # NOTE (leseb): Do everything here because we need to have Ceph started before the main + # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. + echo_summary "Initializing Ceph" + init_ceph + start_ceph + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + if is_service_enabled glance; then + echo_summary "Configuring Glance for Ceph" + configure_ceph_glance + fi + if is_service_enabled nova; then + echo_summary "Configuring Nova for Ceph" + configure_ceph_nova + fi + if is_service_enabled cinder; then + echo_summary "Configuring Cinder for Ceph" + configure_ceph_cinder + # NOTE (leseb): the part below is a requirement from Cinder in order to attach volumes + # so we should run the following within the if statement. + echo_summary "Configuring libvirt secret" + import_libvirt_secret_ceph + fi + fi + + if [[ "$1" == "unstack" ]]; then + stop_ceph + cleanup_ceph + fi + + if [[ "$1" == "clean" ]]; then + cleanup_ceph + fi +fi diff --git a/files/apts/ceph b/files/apts/ceph new file mode 100644 index 0000000000..69863abc34 --- /dev/null +++ b/files/apts/ceph @@ -0,0 +1,2 @@ +ceph # NOPRIME +xfsprogs diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph new file mode 100644 index 0000000000..8d465000e1 --- /dev/null +++ b/files/rpms-suse/ceph @@ -0,0 +1,3 @@ +ceph # NOPRIME +xfsprogs +lsb diff --git a/files/rpms/ceph b/files/rpms/ceph new file mode 100644 index 0000000000..5483735741 --- /dev/null +++ b/files/rpms/ceph @@ -0,0 +1,3 @@ +ceph # NOPRIME +xfsprogs +redhat-lsb-core diff --git a/functions b/functions index ca8ef805cd..cd9e078470 100644 --- a/functions +++ b/functions @@ -546,6 +546,40 @@ if ! function_exists echo_nolog; then } fi + +# create_disk - Create backing disk +function create_disk { + local node_number + local disk_image=${1} + local storage_data_dir=${2} + local loopback_disk_size=${3} + + # Create a loopback disk and format it to XFS. + if [[ -e ${disk_image} ]]; then + if egrep -q ${storage_data_dir} /proc/mounts; then + sudo umount ${storage_data_dir}/drives/sdb1 + sudo rm -f ${disk_image} + fi + fi + + sudo mkdir -p ${storage_data_dir}/drives/images + + sudo truncate -s ${loopback_disk_size} ${disk_image} + + # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in + # a single inode. Keeping the default inode size (256) will result in multiple + # inodes being used to store xattr. Retrieving the xattr will be slower + # since we have to read multiple inodes. This statement is true for both + # Swift and Ceph. + sudo mkfs.xfs -f -i size=1024 ${disk_image} + + # Mount the disk with mount options to make it as efficient as possible + if ! egrep -q ${storage_data_dir} /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${disk_image} ${storage_data_dir} + fi +} + # Restore xtrace $XTRACE diff --git a/lib/ceph b/lib/ceph new file mode 100644 index 0000000000..32a4760784 --- /dev/null +++ b/lib/ceph @@ -0,0 +1,286 @@ +# lib/ceph +# Functions to control the configuration and operation of the **Ceph** storage service + +# Dependencies: +# +# - ``functions`` file +# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined + +# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): +# +# - install_ceph +# - configure_ceph +# - init_ceph +# - start_ceph +# - stop_ceph +# - cleanup_ceph + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. +# Default is the common DevStack data directory. +CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} +CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img + +# Set ``CEPH_CONF_DIR`` to the location of the configuration files. +# Default is ``/etc/ceph``. +CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} + +# DevStack will create a loop-back disk formatted as XFS to store the +# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in +# kilobytes. +# Default is 1 gigabyte. +CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G +CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} + +# Common +CEPH_FSID=$(uuidgen) +CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf + +# Glance +GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} +GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} +GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} +GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} + +# Nova +NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} +NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} +NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} + +# Cinder +CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} +CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} +CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} +CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} +CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} + +# Set ``CEPH_REPLICAS`` to configure how many replicas are to be +# configured for your Ceph cluster. By default we are configuring +# only one replica since this is way less CPU and memory intensive. If +# you are planning to test Ceph replication feel free to increase this value +CEPH_REPLICAS=${CEPH_REPLICAS:-1} +CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) + +# Functions +# ------------ + +# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt +# so it can connect to the Ceph cluster while attaching a Cinder block device +function import_libvirt_secret_ceph { + cat > secret.xml < + ${CINDER_CEPH_UUID} + + client.${CINDER_CEPH_USER} secret + + +EOF + sudo virsh secret-define --file secret.xml + sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) + sudo rm -f secret.xml +} + +# cleanup_ceph() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ceph { + sudo pkill -f ceph-mon + sudo pkill -f ceph-osd + sudo rm -rf ${CEPH_DATA_DIR}/*/* + sudo rm -rf ${CEPH_CONF_DIR}/* + if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then + sudo umount ${CEPH_DATA_DIR} + fi + if [[ -e ${CEPH_DISK_IMAGE} ]]; then + sudo rm -f ${CEPH_DISK_IMAGE} + fi + uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 + VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') + sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1 +} + +# configure_ceph() - Set config files, create data dirs, etc +function configure_ceph { + local count=0 + + # create a backing file disk + create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} + + # populate ceph directory + sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} + + # create ceph monitor initial key and directory + sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' + sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) + + # create a default ceph configuration file + sudo tee -a ${CEPH_CONF_FILE} > /dev/null < /dev/null 2>&1 + sudo service ceph-osd-all stop > /dev/null 2>&1 + else + sudo service ceph stop > /dev/null 2>&1 + fi +} + + +# Restore xtrace +$XTRACE + +## Local variables: +## mode: shell-script +## End: diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph new file mode 100644 index 0000000000..e9d2a02ad3 --- /dev/null +++ b/lib/cinder_backends/ceph @@ -0,0 +1,79 @@ +# lib/cinder_backends/ceph +# Configure the ceph backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph:ceph +# +# Optional parameters: +# CINDER_BAK_CEPH_POOL= +# CINDER_BAK_CEPH_USER= +# CINDER_BAK_CEPH_POOL_PG= +# CINDER_BAK_CEPH_POOL_PGP= + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_lvm - called from configure_cinder() + + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph - Set config files, create data dirs, etc +# configure_cinder_backend_ceph $name +function configure_cinder_backend_ceph { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + if is_service_enabled c-bak; then + # Configure Cinder backup service options, ceph pool, ceph user and ceph key + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} + if [[ $CEPH_REPLICAS -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True + fi +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: