# lib/ceph # Functions to control the configuration and operation of the **Ceph** storage service # Dependencies: # # - ``functions`` file # - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined # ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): # # - install_ceph # - configure_ceph # - init_ceph # - start_ceph # - stop_ceph # - cleanup_ceph # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. # Default is the common DevStack data directory. CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img # Set ``CEPH_CONF_DIR`` to the location of the configuration files. # Default is ``/etc/ceph``. CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} # DevStack will create a loop-back disk formatted as XFS to store the # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} # Common CEPH_FSID=$(uuidgen) CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf # Glance GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} # Nova NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} # Cinder CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} # Set ``CEPH_REPLICAS`` to configure how many replicas are to be # configured for your Ceph cluster. By default we are configuring # only one replica since this is way less CPU and memory intensive. If # you are planning to test Ceph replication feel free to increase this value CEPH_REPLICAS=${CEPH_REPLICAS:-1} CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) # Functions # ------------ # import_libvirt_secret_ceph() - Imports Cinder user key into libvirt # so it can connect to the Ceph cluster while attaching a Cinder block device function import_libvirt_secret_ceph { cat > secret.xml < ${CINDER_CEPH_UUID} client.${CINDER_CEPH_USER} secret EOF sudo virsh secret-define --file secret.xml sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) sudo rm -f secret.xml } # cleanup_ceph() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceph { sudo pkill -f ceph-mon sudo pkill -f ceph-osd sudo rm -rf ${CEPH_DATA_DIR}/*/* sudo rm -rf ${CEPH_CONF_DIR}/* if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then sudo umount ${CEPH_DATA_DIR} fi if [[ -e ${CEPH_DISK_IMAGE} ]]; then sudo rm -f ${CEPH_DISK_IMAGE} fi uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1 } # configure_ceph() - Set config files, create data dirs, etc function configure_ceph { local count=0 # create a backing file disk create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} # populate ceph directory sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} # create ceph monitor initial key and directory sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) # create a default ceph configuration file sudo tee -a ${CEPH_CONF_FILE} > /dev/null < /dev/null 2>&1 sudo service ceph-osd-all stop > /dev/null 2>&1 else sudo service ceph stop > /dev/null 2>&1 fi } # Restore xtrace $XTRACE ## Local variables: ## mode: shell-script ## End: