From 4eb04a5f9e378fa67175056ab94b5803db2be875 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 4 Dec 2014 16:22:41 +0100 Subject: [PATCH] Ability to use a remote Ceph cluster MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes we want to run some benchmarks on virtual machines that will be backed by a Ceph cluster. The first idea that comes in our mind is to use devstack to quickly get an OpenStack up and running but what about the configuration of Devstack with this remote cluster? Thanks to this commit it's now possible to use an already existing Ceph cluster. In this case Devstack just needs two things: * the location of the Ceph config file (by default devstack will look for /etc/ceph/ceph.conf * the admin key of the remote ceph cluster (by default devstack will look for /etc/ceph/ceph.client.admin.keyring) Devstack will then create the necessary pools, users, keys and will connect the OpenStack environment as usual. During the unstack phase every pools, users and keys will be deleted on the remote cluster while local files and ceph-common package will be removed from the current Devstack host. To enable this mode simply add REMOTE_CEPH=True to your localrc file. Change-Id: I1a4b6fd676d50b6a41a09e7beba9b11f8d1478f7 Signed-off-by: Sébastien Han --- extras.d/60-ceph.sh | 52 +++++++++++++++---- lib/ceph | 108 ++++++++++++++++++++++++++++----------- lib/cinder_backends/ceph | 10 ++-- 3 files changed, 126 insertions(+), 44 deletions(-) diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh index 50bdfaee3b..38b901b767 100644 --- a/extras.d/60-ceph.sh +++ b/extras.d/60-ceph.sh @@ -6,14 +6,19 @@ if is_service_enabled ceph; then source $TOP_DIR/lib/ceph elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then echo_summary "Installing Ceph" - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): Do everything here because we need to have Ceph started before the main - # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - init_ceph - start_ceph + check_os_support_ceph + if [ "$REMOTE_CEPH" = "False" ]; then + install_ceph + echo_summary "Configuring Ceph" + configure_ceph + # NOTE (leseb): Do everything here because we need to have Ceph started before the main + # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. + echo_summary "Initializing Ceph" + init_ceph + start_ceph + else + install_ceph_remote + fi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then if is_service_enabled glance; then echo_summary "Configuring Glance for Ceph" @@ -32,14 +37,39 @@ if is_service_enabled ceph; then echo_summary "Configuring libvirt secret" import_libvirt_secret_ceph fi + + if [ "$REMOTE_CEPH" = "False" ]; then + if is_service_enabled glance; then + echo_summary "Configuring Glance for Ceph" + configure_ceph_embedded_glance + fi + if is_service_enabled nova; then + echo_summary "Configuring Nova for Ceph" + configure_ceph_embedded_nova + fi + if is_service_enabled cinder; then + echo_summary "Configuring Cinder for Ceph" + configure_ceph_embedded_cinder + fi + fi fi if [[ "$1" == "unstack" ]]; then - stop_ceph - cleanup_ceph + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + cleanup_ceph_embedded + stop_ceph + fi + cleanup_ceph_general fi if [[ "$1" == "clean" ]]; then - cleanup_ceph + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + cleanup_ceph_embedded + fi + cleanup_ceph_general fi fi diff --git a/lib/ceph b/lib/ceph index 521430626b..d5c916f084 100644 --- a/lib/ceph +++ b/lib/ceph @@ -68,6 +68,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} CEPH_REPLICAS=${CEPH_REPLICAS:-1} CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) +# Connect to an existing Ceph cluster +REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH) +REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} + + # Functions # ------------ @@ -92,29 +97,69 @@ EOF sudo rm -f secret.xml } +# undefine_virsh_secret() - Undefine Cinder key secret from libvirt +function undefine_virsh_secret { + if is_service_enabled cinder || is_service_enabled nova; then + local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') + sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 + fi +} + + +# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph +function check_os_support_ceph { + if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then + echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" + if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then + die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" + fi + NO_UPDATE_REPOS=False + fi +} + # cleanup_ceph() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_ceph { +function cleanup_ceph_remote { + # do a proper cleanup from here to avoid leftover on the remote Ceph cluster + if is_service_enabled glance; then + sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 + sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1 + fi + if is_service_enabled cinder; then + sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 + sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1 + fi + if is_service_enabled c-bak; then + sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 + sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1 + fi + if is_service_enabled nova; then + iniset $NOVA_CONF libvirt rbd_secret_uuid "" + sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 + fi +} + +function cleanup_ceph_embedded { sudo pkill -f ceph-mon sudo pkill -f ceph-osd sudo rm -rf ${CEPH_DATA_DIR}/*/* - sudo rm -rf ${CEPH_CONF_DIR}/* if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then sudo umount ${CEPH_DATA_DIR} fi if [[ -e ${CEPH_DISK_IMAGE} ]]; then sudo rm -f ${CEPH_DISK_IMAGE} fi - uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 - if is_service_enabled cinder || is_service_enabled nova; then - local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') - sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 - fi - if is_service_enabled nova; then - iniset $NOVA_CONF libvirt rbd_secret_uuid "" - fi } +function cleanup_ceph_general { + undefine_virsh_secret + uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 + + # purge ceph config file and keys + sudo rm -rf ${CEPH_CONF_DIR}/* +} + + # configure_ceph() - Set config files, create data dirs, etc function configure_ceph { local count=0 @@ -130,7 +175,7 @@ function configure_ceph { sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) # create a default ceph configuration file - sudo tee -a ${CEPH_CONF_FILE} > /dev/null < /dev/null <