Clean up stack.sh config

* Clean up interactive configuration
* Complete moving initialization of service-specific varialbes into the
  service lib/* files.
* Cosmetic cleanups

Change-Id: Iea14359bd224dd5533201d4c7cb1437d5382c4d1
This commit is contained in:
Dean Troyer 2013-03-18 16:07:56 -05:00
parent 71404ed5a4
commit b7490da972
6 changed files with 71 additions and 51 deletions

View File

@ -53,6 +53,11 @@ fi
# Support for multi lvm backend configuration (default is no support)
CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
# Should cinder perform secure deletion of volumes?
# Defaults to true, can be set to False to avoid this bug when testing:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
# Name of the lvm volume groups to use/create for iscsi volumes
# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}

View File

@ -59,6 +59,9 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
# Set the tenant for service accounts in Keystone
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
# Entry Points
# ------------

View File

@ -65,6 +65,9 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
QEMU_CONF=/etc/libvirt/qemu.conf
NOVNC_DIR=$DEST/noVNC
SPICE_DIR=$DEST/spice-html5
# Nova Network Configuration
# --------------------------

View File

@ -28,6 +28,7 @@ set +o xtrace
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
SWIFT3_DIR=$DEST/swift3
# TODO: add logging to different location.
@ -40,6 +41,12 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
if is_service_enabled s-proxy && is_service_enabled swift3; then
# If we are using swift3, we can default the s3 port to swift instead
# of nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
fi
# DevStack will create a loop-back disk formatted as XFS to store the
# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
# kilobytes.

View File

@ -269,14 +269,12 @@ source $TOP_DIR/lib/ldap
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
NOVNC_DIR=$DEST/noVNC
SPICE_DIR=$DEST/spice-html5
SWIFT3_DIR=$DEST/swift3
# Should cinder perform secure deletion of volumes?
# Defaults to true, can be set to False to avoid this bug when testing:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
# Interactive Configuration
# -------------------------
# Do all interactive config up front before the logging spew begins
# Generic helper to configure passwords
function read_password {
@ -322,7 +320,6 @@ function read_password {
# Database Configuration
# ----------------------
# To select between database backends, add the following to ``localrc``:
#
@ -335,8 +332,7 @@ function read_password {
initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
# RabbitMQ or Qpid
# --------------------------
# Queue Configuration
# Rabbit connection info
if is_service_enabled rabbit; then
@ -344,25 +340,10 @@ if is_service_enabled rabbit; then
read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
fi
if is_service_enabled s-proxy; then
# If we are using swift3, we can default the s3 port to swift instead
# of nova-objectstore
if is_service_enabled swift3;then
S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
fi
# We only ask for Swift Hash if we have enabled swift service.
# ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
# Set default port for nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
# Keystone
# --------
if is_service_enabled key; then
# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
# just a string and is not a 'real' Keystone token.
read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
@ -370,27 +351,34 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN
read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
# Horizon currently truncates usernames and passwords at 20 characters
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
# Keystone can now optionally install OpenLDAP by adding ldap to the list
# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap).
# If OpenLDAP has already been installed but you need to clear out
# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes
# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the
# Keystone Identity Driver (keystone.identity.backends.ldap.Identity)
# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap)
# in the localrc file.
# Keystone can now optionally install OpenLDAP by enabling the ``ldap``
# service in ``localrc`` (e.g. ``enable_service ldap``).
# To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
# to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the
# Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
# set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
# ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
# only request ldap password if the service is enabled
if is_service_enabled ldap; then
read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
fi
# Set the tenant for service accounts in Keystone
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
fi
# Log files
# ---------
# Swift
if is_service_enabled s-proxy; then
# We only ask for Swift Hash if we have enabled swift service.
# ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
# Configure logging
# -----------------
# Draw a spinner so the user knows something is happening
function spinner() {
@ -638,14 +626,15 @@ fi
echo_summary "Configuring OpenStack projects"
# Set up our checkouts so they are installed into python path
# allowing ``import nova`` or ``import glance.client``
# Set up our checkouts so they are installed in the python path
configure_keystoneclient
configure_novaclient
setup_develop $OPENSTACKCLIENT_DIR
if is_service_enabled key g-api n-api s-proxy; then
configure_keystone
fi
if is_service_enabled s-proxy; then
configure_swift
configure_swiftclient
@ -653,6 +642,7 @@ if is_service_enabled s-proxy; then
setup_develop $SWIFT3_DIR
fi
fi
if is_service_enabled g-api n-api; then
configure_glance
fi
@ -666,17 +656,21 @@ if is_service_enabled nova; then
cleanup_nova
configure_nova
fi
if is_service_enabled horizon; then
configure_horizon
fi
if is_service_enabled quantum; then
setup_quantumclient
setup_quantum
fi
if is_service_enabled heat; then
configure_heat
configure_heatclient
fi
if is_service_enabled cinder; then
configure_cinder
fi
@ -698,6 +692,7 @@ if is_service_enabled tls-proxy; then
# don't be naive and add to existing line!
fi
# Syslog
# ------
@ -992,6 +987,7 @@ if is_service_enabled nova && is_baremetal; then
fi
fi
# Launch Services
# ===============
@ -1081,6 +1077,7 @@ if is_service_enabled heat; then
start_heat
fi
# Create account rc files
# =======================
@ -1191,6 +1188,7 @@ fi
# Check the status of running services
service_check
# Fin
# ===

View File

@ -201,6 +201,10 @@ VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
# Set default port for nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}