diff --git a/exercise.sh b/exercise.sh index ce694fba66..19c9d80451 100755 --- a/exercise.sh +++ b/exercise.sh @@ -2,7 +2,7 @@ # **exercise.sh** -# Keep track of the current devstack directory. +# Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -14,11 +14,11 @@ source $TOP_DIR/stackrc # Run everything in the exercises/ directory that isn't explicitly disabled # comma separated list of script basenames to skip -# to refrain from exercising euca.sh use SKIP_EXERCISES=euca +# to refrain from exercising euca.sh use ``SKIP_EXERCISES=euca`` SKIP_EXERCISES=${SKIP_EXERCISES:-""} # comma separated list of script basenames to run -# to run only euca.sh use RUN_EXERCISES=euca +# to run only euca.sh use ``RUN_EXERCISES=euca`` basenames=${RUN_EXERCISES:-""} EXERCISE_DIR=$TOP_DIR/exercises @@ -27,7 +27,7 @@ if [[ -z "${basenames}" ]]; then # Locate the scripts we should run basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) else - # If RUN_EXERCISES was specified, ignore SKIP_EXERCISES. + # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``. SKIP_EXERCISES= fi @@ -56,7 +56,7 @@ for script in $basenames; do fi done -# output status of exercise run +# Output status of exercise run echo "=====================================================================" for script in $skips; do echo SKIP $script diff --git a/functions b/functions index 9adbfe7cf6..5bc8456281 100644 --- a/functions +++ b/functions @@ -439,7 +439,7 @@ function check_path_perm_sanity { echo "*** DEST path element" echo "*** ${rebuilt_path}" echo "*** appears to have 0700 permissions." - echo "*** This is very likely to cause fatal issues for devstack daemons." + echo "*** This is very likely to cause fatal issues for DevStack daemons." if [[ -n "$SKIP_PATH_SANITY" ]]; then return @@ -526,8 +526,8 @@ function setup_colorized_logging { } # These functions are provided for basic fall-back functionality for -# projects that include parts of devstack (grenade). stack.sh will -# override these with more specific versions for devstack (with fancy +# projects that include parts of DevStack (Grenade). stack.sh will +# override these with more specific versions for DevStack (with fancy # spinners, etc). We never override an existing version if ! function_exists echo_summary; then function echo_summary { diff --git a/functions-common b/functions-common index 0f80e98f43..f1aca29b30 100644 --- a/functions-common +++ b/functions-common @@ -971,7 +971,7 @@ function get_packages { # # Only packages required for enabled and collected plugins will included. # -# The same metadata used in the main devstack prerequisite files may be used +# The same metadata used in the main DevStack prerequisite files may be used # in these prerequisite files, see get_packages() for more info. function get_plugin_packages { local xtrace=$(set +o | grep xtrace) @@ -1471,7 +1471,7 @@ function fetch_plugins { return fi - echo "Fetching devstack plugins" + echo "Fetching DevStack plugins" for plugin in ${plugins//,/ }; do git_clone_by_name $plugin done diff --git a/gate/updown.sh b/gate/updown.sh index d2d7351a2f..f46385cfe1 100755 --- a/gate/updown.sh +++ b/gate/updown.sh @@ -4,7 +4,7 @@ # # Note: this is expected to start running as jenkins -# Step 1: give back sudoers permissions to devstack +# Step 1: give back sudoers permissions to DevStack TEMPFILE=`mktemp` echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE chmod 0440 $TEMPFILE diff --git a/lib/ceilometer b/lib/ceilometer index 7b2215c3d3..81353093b2 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -4,7 +4,7 @@ # Install and start **Ceilometer** service # To enable a minimal set of Ceilometer services, add the following to the -# localrc section of local.conf: +# ``localrc`` section of ``local.conf``: # # enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api # @@ -17,14 +17,11 @@ # of Ceilometer (see within for additional settings): # # CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi. -# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing -# runs. Default 600. -# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es') -# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided -# by tooz. +# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. +# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') +# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENTS: Enable event collection - # Dependencies: # # - functions @@ -94,7 +91,7 @@ function is_ceilometer_enabled { return 1 } -# create_ceilometer_accounts() - Set up common required ceilometer accounts +# create_ceilometer_accounts() - Set up common required Ceilometer accounts # # Project User Roles # ------------------------------------------------------------------ @@ -117,14 +114,14 @@ function create_ceilometer_accounts { "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access swift account stats. + # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME fi fi } -# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +# _cleanup_keystone_apache_wsgi() - Remove WSGI files, disable and remove Apache vhost file function _cleanup_ceilometer_apache_wsgi { sudo rm -f $CEILOMETER_WSGI_DIR/* sudo rm -f $(apache_site_config_for ceilometer) @@ -149,7 +146,7 @@ function _config_ceilometer_apache_wsgi { local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) - # copy proxy vhost and wsgi file + # Copy proxy vhost and wsgi file sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf @@ -189,9 +186,9 @@ function configure_ceilometer { sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml fi - # the compute and central agents need these credentials in order to - # call out to other services' public APIs - # the alarm evaluator needs these options to call ceilometer APIs + # The compute and central agents need these credentials in order to + # call out to other services' public APIs. + # The alarm evaluator needs these options to call ceilometer APIs iniset $CEILOMETER_CONF service_credentials os_username ceilometer iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME @@ -237,7 +234,7 @@ function configure_ceilometer { } function configure_mongodb { - # server package is the same on all + # Server package is the same on all local packages=mongodb-server if is_fedora; then @@ -250,13 +247,13 @@ function configure_mongodb { install_package ${packages} if is_fedora; then - # ensure smallfiles selected to minimize freespace requirements + # Ensure smallfiles is selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod restart_service mongod fi - # give mongodb time to start-up + # Give mongodb time to start-up sleep 5 } @@ -347,7 +344,7 @@ function start_ceilometer { run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" fi - # only die on API if it was actually intended to be turned on + # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then diff --git a/lib/dstat b/lib/dstat index 740e48f9e0..c8faa6578c 100644 --- a/lib/dstat +++ b/lib/dstat @@ -41,7 +41,7 @@ function start_dstat { # stop_dstat() stop dstat process function stop_dstat { # dstat runs as a console, not as a service, and isn't trackable - # via the normal mechanisms for devstack. So lets just do a + # via the normal mechanisms for DevStack. So lets just do a # killall and move on. killall dstat || /bin/true } diff --git a/lib/horizon b/lib/horizon index c6e3692d47..63a9d0fe46 100644 --- a/lib/horizon +++ b/lib/horizon @@ -129,7 +129,7 @@ function init_horizon { fi enable_apache_site horizon - # Remove old log files that could mess with how devstack detects whether Horizon + # Remove old log files that could mess with how DevStack detects whether Horizon # has been successfully started (see start_horizon() and functions::screen_it()) # and run_process sudo rm -f /var/log/$APACHE_NAME/horizon_* diff --git a/lib/ironic b/lib/ironic index a7738bc14e..fcf1a543a9 100644 --- a/lib/ironic +++ b/lib/ironic @@ -53,7 +53,7 @@ IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} # The file is composed of multiple lines, each line includes four field # separated by white space: IPMI address, MAC address, IPMI username # and IPMI password. -# An example: +# # 192.168.110.107 00:1e:67:57:50:4c root otc123 IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} @@ -99,10 +99,10 @@ IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} # Which deploy driver to use - valid choices right now -# are 'pxe_ssh', 'pxe_ipmitool', 'agent_ssh' and 'agent_ipmitool'. +# are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``. IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh} -#TODO(agordeev): replace 'ubuntu' with host distro name getting +# TODO(agordeev): replace 'ubuntu' with host distro name getting IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT} # Support entry points installation of console scripts diff --git a/lib/lvm b/lib/lvm index d0322c76b3..6c59937b0c 100644 --- a/lib/lvm +++ b/lib/lvm @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/lvm # Configure the default LVM volume group used by Cinder and Nova @@ -32,8 +34,8 @@ DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default BACKING_FILE_SUFFIX=-backing-file -# Entry Points -# ------------ +# Functions +# --------- # _clean_lvm_volume_group removes all default LVM volumes # @@ -52,7 +54,7 @@ function _clean_lvm_volume_group { function _clean_lvm_backing_file { local backing_file=$1 - # if the backing physical device is a loop device, it was probably setup by devstack + # If the backing physical device is a loop device, it was probably setup by DevStack if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') sudo losetup -d $vg_dev diff --git a/lib/nova b/lib/nova index 8e1b2f7b9d..385da4e44b 100644 --- a/lib/nova +++ b/lib/nova @@ -55,8 +55,9 @@ NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # NOVA_API_VERSION valid options -# - default - setup API end points as nova does out of the box -# - v21default - make v21 the default on /v2 +# - default - setup API end points as nova does out of the box +# - v21default - make v21 the default on /v2 +# # NOTE(sdague): this is for transitional testing of the Nova v21 API. # Expect to remove in L or M. NOVA_API_VERSION=${NOVA_API_VERSION-default} @@ -77,7 +78,7 @@ EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} # Option to enable/disable config drive -# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive +# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} # Nova supports pluggable schedulers. The default ``FilterScheduler`` @@ -89,11 +90,11 @@ QEMU_CONF=/etc/libvirt/qemu.conf # Set default defaults here as some hypervisor drivers override these PUBLIC_INTERFACE_DEFAULT=br100 FLAT_NETWORK_BRIDGE_DEFAULT=br100 -# set the GUEST_INTERFACE_DEFAULT to some interface on the box so that -# the default isn't completely crazy. This will match eth*, em*, or -# the new p* interfaces, then basically picks the first +# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that +# the default isn't completely crazy. This will match ``eth*``, ``em*``, or +# the new ``p*`` interfaces, then basically picks the first # alphabetically. It's probably wrong, however it's less wrong than -# always using 'eth0' which doesn't exist on new Linux distros at all. +# always using ``eth0`` which doesn't exist on new Linux distros at all. GUEST_INTERFACE_DEFAULT=$(ip link \ | grep 'state UP' \ | awk '{print $2}' \ @@ -101,8 +102,8 @@ GUEST_INTERFACE_DEFAULT=$(ip link \ | grep ^[ep] \ | head -1) -# $NOVA_VNC_ENABLED can be used to forcibly enable vnc configuration. -# In multi-node setups allows compute hosts to not run n-novnc. +# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. +# In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) # Get hypervisor configuration @@ -144,7 +145,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # running the VM - removing a SPOF and bandwidth bottleneck. MULTI_HOST=$(trueorfalse False MULTI_HOST) -# ``NOVA_ALLOW_MOVE_TO_SAME_HOST` can be set to False in multi node devstack, +# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) diff --git a/lib/oslo b/lib/oslo index 86efb60a4e..d9688a01cd 100644 --- a/lib/oslo +++ b/lib/oslo @@ -2,7 +2,7 @@ # # lib/oslo # -# Functions to install oslo libraries from git +# Functions to install **Oslo** libraries from git # # We need this to handle the fact that projects would like to use # pre-released versions of oslo libraries. @@ -46,8 +46,9 @@ GITDIR["tooz"]=$DEST/tooz # Support entry points installation of console scripts OSLO_BIN_DIR=$(get_python_exec_prefix) -# Entry Points -# ------------ + +# Functions +# --------- function _do_install_oslo_lib { local name=$1 diff --git a/lib/rpc_backend b/lib/rpc_backend index 3033cbe08e..d82af6de6e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -1,8 +1,7 @@ #!/bin/bash # # lib/rpc_backend -# Interface for interactig with different rpc backend -# rpc backend settings +# Interface for interactig with different RPC backends # Dependencies: # @@ -27,10 +26,10 @@ RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9} # messaging server as a service, which it really isn't for multi host QPID_HOST=${QPID_HOST:-} + # Functions # --------- - # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. function check_rpc_backend { diff --git a/lib/stack b/lib/stack index 11dd87ca28..47e8ce2a22 100644 --- a/lib/stack +++ b/lib/stack @@ -2,15 +2,18 @@ # # lib/stack # -# These functions are code snippets pulled out of stack.sh for easier +# These functions are code snippets pulled out of ``stack.sh`` for easier # re-use by Grenade. They can assume the same environment is available -# as in the lower part of stack.sh, namely a valid stackrc has been sourced -# as well as all of the lib/* files for the services have been sourced. +# as in the lower part of ``stack.sh``, namely a valid stackrc has been sourced +# as well as all of the ``lib/*`` files for the services have been sourced. # # For clarity, all functions declared here that came from ``stack.sh`` # shall be named with the prefix ``stack_``. +# Functions +# --------- + # Generic service install handles venv creation if confgured for service # stack_install_service service function stack_install_service { diff --git a/lib/swift b/lib/swift index 28ef7de1f1..07068bb104 100644 --- a/lib/swift +++ b/lib/swift @@ -38,7 +38,6 @@ fi # Set up default directories GITDIR["python-swiftclient"]=$DEST/python-swiftclient - SWIFT_DIR=$DEST/swift SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} @@ -59,7 +58,7 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} if is_service_enabled s-proxy && is_service_enabled swift3; then - # If we are using swift3, we can default the s3 port to swift instead + # If we are using ``swift3``, we can default the S3 port to swift instead # of nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} fi @@ -137,11 +136,12 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} +# Toggle for deploying Swift under HTTPD + mod_wsgi +SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} + # Tell Tempest this project is present TEMPEST_SERVICES+=,swift -# Toggle for deploying Swift under HTTPD + mod_wsgi -SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} # Functions # --------- @@ -303,7 +303,6 @@ function generate_swift_config_services { sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} } - # configure_swift() - Set config files, create data dirs and loop image function configure_swift { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" @@ -374,12 +373,9 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY" fi - # Devstack is commonly run in a small slow environment, so bump the - # timeouts up. - # node_timeout is how long between read operations a node takes to - # respond to the proxy server - # conn_timeout is all about how long it takes a connect() system call to - # return + # DevStack is commonly run in a small slow environment, so bump the timeouts up. + # ``node_timeout`` is the node read operation response time to the proxy server + # ``conn_timeout`` is how long it takes a connect() system call to return iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 @@ -394,10 +390,10 @@ function configure_swift { SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" fi - # Restrict the length of auth tokens in the swift proxy-server logs. + # Restrict the length of auth tokens in the Swift ``proxy-server`` logs. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} - # By default Swift will be installed with keystone and tempauth middleware + # By default Swift will be installed with Keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # token for keystoneauth would have the standard reseller_prefix `AUTH_` @@ -413,17 +409,13 @@ function configure_swift { sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - # Configure Crossdomain iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" - - # This causes the authtoken middleware to use the same python logging - # adapter provided by the swift proxy-server, so that request transaction + # Configure authtoken middleware to use the same Python logging + # adapter provided by the Swift ``proxy-server``, so that request transaction # IDs will included in all of its log messages. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift @@ -436,7 +428,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - # Configure Tempauth. In the sample config file, Keystoneauth is commented + # Configure Tempauth. In the sample config file Keystoneauth is commented # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth # otherwise, this code also sets the reseller_prefix for Keystoneauth. iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate @@ -579,7 +571,8 @@ function create_swift_disk { sudo chown -R ${STACK_USER}: ${node} done } -# create_swift_accounts() - Set up standard swift accounts and extra + +# create_swift_accounts() - Set up standard Swift accounts and extra # one for tests we do this by attaching all words in the account name # since we want to make it compatible with tempauth which use # underscores for separators. @@ -593,9 +586,9 @@ function create_swift_disk { # swifttenanttest4 swiftusertest4 admin swift_test function create_swift_accounts { - # Defines specific passwords used by tools/create_userrc.sh - # As these variables are used by create_userrc.sh, they must be exported - # The _password suffix is expected by create_userrc.sh + # Defines specific passwords used by ``tools/create_userrc.sh`` + # As these variables are used by ``create_userrc.sh,`` they must be exported + # The _password suffix is expected by ``create_userrc.sh``. export swiftusertest1_password=testing export swiftusertest2_password=testing2 export swiftusertest3_password=testing3 @@ -725,8 +718,8 @@ function start_swift { # By default with only one replica we are launching the proxy, # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy + # other services in background. If we have ``SWIFT_REPLICAS`` set to something + # greater than one we first spawn all the Swift services then kill the proxy # service so we can run it in foreground in screen. ``swift-init ... # {stop|restart}`` exits with '1' if no servers are running, ignore it just # in case @@ -762,7 +755,7 @@ function stop_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 fi - # screen normally killed by unstack.sh + # screen normally killed by ``unstack.sh`` if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi diff --git a/lib/tempest b/lib/tempest index 8672a14338..d86ee27fd0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -62,13 +62,11 @@ BUILD_INTERVAL=1 # The default is set to 196 seconds. BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} - # This must be False on stable branches, as master tempest # deps do not match stable branch deps. Set this to True to -# have tempest installed in devstack by default. +# have tempest installed in DevStack by default. INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} - BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}" BOTO_CONF=/etc/boto.cfg @@ -83,6 +81,7 @@ TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PR IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) + # Functions # --------- @@ -168,8 +167,8 @@ function configure_tempest { esac fi - # Create tempest.conf from tempest.conf.sample - # copy every time, because the image UUIDS are going to change + # Create ``tempest.conf`` from ``tempest.conf.sample`` + # Copy every time because the image UUIDS are going to change sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG @@ -179,8 +178,8 @@ function configure_tempest { # the cloud. We don't always want to so that we can ensure Tempest # would work on a public cloud. TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) - # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo - # user and tenant are set up... + + # See ``lib/keystone`` where these users and tenants are set up ADMIN_USERNAME=${ADMIN_USERNAME:-admin} ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-Default} @@ -191,13 +190,13 @@ function configure_tempest { ADMIN_TENANT_ID=$(openstack project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then - # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior - # Tempest creates instane types for himself + # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior + # Tempest creates its own instance types if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then if is_arch "ppc64"; then - # qemu needs at least 128MB of memory to boot on ppc64 + # Qemu needs at least 128MB of memory to boot on ppc64 nova flavor-create m1.nano 42 128 0 1 else nova flavor-create m1.nano 42 64 0 1 @@ -214,8 +213,7 @@ function configure_tempest { fi flavor_ref_alt=84 else - # Check Nova for existing flavors and, if set, look for the - # ``DEFAULT_INSTANCE_TYPE`` and use that. + # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it. boto_instance_type=$DEFAULT_INSTANCE_TYPE flavor_lines=`nova flavor-list` IFS=$'\r\n' @@ -240,8 +238,8 @@ function configure_tempest { flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - # ensure flavor_ref and flavor_ref_alt have different values - # some resize instance in tempest tests depends on this. + # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. + # Some resize instance in tempest tests depends on this. for f in ${flavors[@]:1}; do if [[ $f -ne $flavor_ref ]]; then flavor_ref_alt=$f @@ -266,7 +264,7 @@ function configure_tempest { public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') if [ "$Q_USE_NAMESPACE" == "False" ]; then - # If namespaces are disabled, devstack will create a single + # If namespaces are disabled, DevStack will create a single # public router that tempest should be configured to use. public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ { print \$2 }") @@ -274,6 +272,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG + # Oslo iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH @@ -309,15 +308,13 @@ function configure_tempest { fi # Image - # for the gate we want to be able to override this variable so we aren't - # doing an HTTP fetch over the wide internet for this test + # We want to be able to override this variable in the gate to avoid + # doing an external HTTP fetch for this test. if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi # Auth - # - # TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG auth tempest_roles "Member" @@ -336,7 +333,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute Features - # Run verify_tempest_config -ur to retrieve enabled extensions on API endpoints + # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config local tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR @@ -417,11 +414,11 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Telemetry - # Ceilometer API optimization happened in juno that allows to run more tests in tempest. + # Ceilometer API optimization happened in Juno that allows to run more tests in tempest. # Once Tempest retires support for icehouse this flag can be removed. iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False" - # Object storage + # Object Store local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint @@ -445,7 +442,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi - # Using CINDER_ENABLED_BACKENDS + # Using ``CINDER_ENABLED_BACKENDS`` if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" local i=1 @@ -470,7 +467,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" - # cli + # CLI iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR # Baremetal @@ -495,7 +492,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi - # service_available + # ``service_available`` for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" @@ -505,7 +502,7 @@ function configure_tempest { done if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - # Use the BOTO_CONFIG environment variable to point to this file + # Use the ``BOTO_CONFIG`` environment variable to point to this file iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE sudo chown $STACK_USER $BOTO_CONF fi @@ -520,7 +517,6 @@ function configure_tempest { # ------------------------------------------------------------------ # alt_demo alt_demo Member -# Migrated from keystone_data.sh function create_tempest_accounts { if is_service_enabled tempest; then # Tempest has some tests that validate various authorization checks @@ -531,13 +527,13 @@ function create_tempest_accounts { fi } -# install_tempest_lib() - Collect source, prepare, and install tempest-lib +# install_tempest_lib() - Collect source, prepare, and install ``tempest-lib`` function install_tempest_lib { if use_library_from_git "tempest-lib"; then git_clone_by_name "tempest-lib" setup_dev_lib "tempest-lib" - # NOTE(mtreinish) For testing tempest-lib from git with tempest we need - # put the git version of tempest-lib in the tempest job's tox venv + # NOTE(mtreinish) For testing ``tempest-lib`` from git with Tempest we need to + # put the git version of ``tempest-lib`` in the Tempest job's tox venv export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]} setup_dev_lib "tempest-lib" unset PIP_VIRTUAL_ENV @@ -555,7 +551,7 @@ function install_tempest { popd } -# init_tempest() - Initialize ec2 images +# init_tempest() - Initialize EC2 images function init_tempest { local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH} # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec @@ -564,7 +560,7 @@ function init_tempest { local ramdisk="$image_dir/${base_image_name}-initrd" local disk_image="$image_dir/${base_image_name}-blank.img" if is_service_enabled nova; then - # if the cirros uec downloaded and the system is uec capable + # If the CirrOS uec downloaded and the system is UEC capable if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then echo "Prepare aki/ari/ami Images" diff --git a/lib/tls b/lib/tls index 677895b9b2..09f1c2dfdd 100644 --- a/lib/tls +++ b/lib/tls @@ -32,6 +32,7 @@ # - is_ssl_enabled_service # - enable_mod_ssl + # Defaults # -------- @@ -92,7 +93,6 @@ function create_CA_base { cp /dev/null $ca_dir/index.txt } - # Create a new CA configuration file # create_CA_config ca-dir common-name function create_CA_config { @@ -248,7 +248,6 @@ function init_cert { fi } - # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] function make_cert { @@ -287,7 +286,6 @@ function make_cert { fi } - # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir function make_int_CA { @@ -362,17 +360,16 @@ function is_ssl_enabled_service { return 1 } - # Ensure that the certificates for a service are in place. This function does # not check that a service is SSL enabled, this should already have been # completed. # # The function expects to find a certificate, key and CA certificate in the -# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For -# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and -# KEYSTONE_SSL_CA. +# variables ``{service}_SSL_CERT``, ``{service}_SSL_KEY`` and ``{service}_SSL_CA``. For +# example for keystone this would be ``KEYSTONE_SSL_CERT``, ``KEYSTONE_SSL_KEY`` and +# ``KEYSTONE_SSL_CA``. # -# If it does not find these certificates then the devstack-issued server +# If it does not find these certificates then the DevStack-issued server # certificate, key and CA certificate will be associated with the service. # # If only some of the variables are provided then the function will quit. @@ -437,14 +434,12 @@ function start_tls_proxy { # Cleanup Functions # ================= - # Stops all stud processes. This should be done only after all services # using tls configuration are down. function stop_tls_proxy { killall stud } - # Remove CA along with configuration, as well as the local server certificate function cleanup_CA { rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" diff --git a/lib/trove b/lib/trove index 5dd4f23611..b0a96100c2 100644 --- a/lib/trove +++ b/lib/trove @@ -21,6 +21,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + # Defaults # -------- if is_service_enabled neutron; then @@ -80,7 +81,7 @@ function setup_trove_logging { fi } -# create_trove_accounts() - Set up common required trove accounts +# create_trove_accounts() - Set up common required Trove accounts # Tenant User Roles # ------------------------------------------------------------------ @@ -115,7 +116,6 @@ function cleanup_trove { rm -fr $TROVE_CONF_DIR/* } - # configure_trove() - Set config files, create data dirs, etc function configure_trove { setup_develop $TROVE_DIR diff --git a/run_tests.sh b/run_tests.sh index 3ba7e1023d..c6b7da64c0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -11,9 +11,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -# -# this runs a series of unit tests for devstack to ensure it's functioning + +# This runs a series of unit tests for DevStack to ensure it's functioning PASSES="" FAILURES="" diff --git a/samples/local.conf b/samples/local.conf index 63000b65ba..bd0cd9c0db 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -1,7 +1,6 @@ # Sample ``local.conf`` for user-configurable variables in ``stack.sh`` -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. # ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. diff --git a/samples/local.sh b/samples/local.sh index 664cb663fe..634f6ddb17 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -3,15 +3,14 @@ # Sample ``local.sh`` for user-configurable tasks to run automatically # at the successful conclusion of ``stack.sh``. -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. # This is a collection of some of the things we have found to be useful to run # after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. # These should be considered as samples and are unsupported DevStack code. -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -50,7 +49,7 @@ if is_service_enabled nova; then source $TOP_DIR/openrc admin admin # Name of new flavor - # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + # set in ``local.conf`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` MI_NAME=m1.micro # Create micro flavor if not present diff --git a/stack.sh b/stack.sh index 090d527328..8ab82348f3 100755 --- a/stack.sh +++ b/stack.sh @@ -16,18 +16,11 @@ # (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in -# a VM or physical server. Additionally, we maintain a list of ``apt`` and +# a VM or physical server. Additionally, we maintain a list of ``deb`` and # ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org -# check if someone has invoked with "sh" -if [[ "${POSIXLY_CORRECT}" == "y" ]]; then - echo "You appear to be running bash in POSIX compatibility mode." - echo "devstack uses bash features. \"./stack.sh\" should do the right thing" - exit 1 -fi - # Make sure custom grep options don't get in the way unset GREP_OPTIONS @@ -44,7 +37,7 @@ umask 022 # Not all distros have sbin in PATH for regular users. PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Check for uninitialized variables, a big cause of bugs @@ -53,6 +46,10 @@ if [[ -n "$NOUNSET" ]]; then set -o nounset fi + +# Configuration +# ============= + # Sanity Checks # ------------- @@ -61,7 +58,7 @@ if [[ -r $TOP_DIR/.stackenv ]]; then rm $TOP_DIR/.stackenv fi -# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config +# ``stack.sh`` keeps the list of ``deb`` and ``rpm`` dependencies, config # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then @@ -69,12 +66,23 @@ if [ ! -d $FILES ]; then fi # ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/inc`` directory is present +if [ ! -d $TOP_DIR/inc ]; then + die $LINENO "missing devstack/inc" +fi + +# ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then die $LINENO "missing devstack/lib" fi -# Check if run as root +# Check if run in POSIX shell +if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer." + exit 1 +fi + # OpenStack is designed to be run as a non-root user; Horizon will fail to run # as **root** since Apache will not serve content from **root** user). # ``stack.sh`` must not be run as **root**. It aborts and suggests one course of @@ -89,8 +97,6 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi -# Print the kernel version -uname -a # Prepare the environment # ----------------------- @@ -112,6 +118,7 @@ source $TOP_DIR/lib/stack # and ``DISTRO`` GetDistro + # Global Settings # --------------- @@ -134,7 +141,6 @@ if [[ -r $TOP_DIR/local.conf ]]; then done fi - # ``stack.sh`` is customizable by setting environment variables. Override a # default setting via export:: # @@ -145,18 +151,20 @@ fi # # DATABASE_PASSWORD=simple ./stack.sh # -# Persistent variables can be placed in a ``localrc`` file:: +# Persistent variables can be placed in a ``local.conf`` file:: # +# [[local|localrc]] # DATABASE_PASSWORD=anothersecret # DATABASE_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. ``localrc`` is not distributed with DevStack and will never +# in most cases. ``local.conf`` is not distributed with DevStack and will never # be overwritten by a DevStack update. # # DevStack distributes ``stackrc`` which contains locations for the OpenStack # repositories, branches to configure, and other configuration defaults. -# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. +# ``stackrc`` sources the ``localrc`` section of ``local.conf`` to allow you to +# safely override those settings. if [[ ! -r $TOP_DIR/stackrc ]]; then die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" @@ -188,34 +196,27 @@ fi # Make sure the proxy config is visible to sub-processes export_proxy_variables -# Remove services which were negated in ENABLED_SERVICES +# Remove services which were negated in ``ENABLED_SERVICES`` # using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). disable_negated_services -# Look for obsolete stuff -# if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then -# echo "FATAL: 'swift' is not supported as a service name" -# echo "FATAL: Use the actual swift service names to enable them as required:" -# echo "FATAL: s-proxy s-object s-container s-account" -# exit 1 -# fi # Configure sudo # -------------- -# We're not **root**, make sure ``sudo`` is available +# We're not as **root** so make sure ``sudo`` is available is_package_installed sudo || install_package sudo # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers -# Set up devstack sudoers +# Set up DevStack sudoers TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH +# Some binaries might be under ``/sbin`` or ``/usr/sbin``, so make sure sudo will +# see them by forcing ``PATH`` echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE chmod 0440 $TEMPFILE @@ -226,7 +227,7 @@ sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh # Configure Distro Repositories # ----------------------------- -# For debian/ubuntu make apt attempt to retry network ops on it's own +# For Debian/Ubuntu make apt attempt to retry network ops on it's own if is_ubuntu; then echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null fi @@ -237,7 +238,7 @@ fi if is_fedora && [[ $DISTRO == "rhel7" ]]; then # RHEL requires EPEL for many Open Stack dependencies - # note we always remove and install latest -- some environments + # NOTE: We always remove and install latest -- some environments # use snapshot images, and if EPEL version updates they break # unless we update them to latest version. if sudo yum repolist enabled epel | grep -q 'epel'; then @@ -248,7 +249,7 @@ if is_fedora && [[ $DISTRO == "rhel7" ]]; then # repo, then removes itself (as epel-release installed the # "real" repo). # - # you would think that rather than this, you could use + # You would think that rather than this, you could use # $releasever directly in .repo file we create below. However # RHEL gives a $releasever of "6Server" which breaks the path; # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759 @@ -265,7 +266,7 @@ EOF sudo yum-config-manager --enable epel-bootstrap yum_install epel-release || \ die $LINENO "Error installing EPEL repo, cannot continue" - # epel rpm has installed it's version + # EPEL rpm has installed it's version sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo # ... and also optional to be enabled @@ -300,7 +301,7 @@ sudo mkdir -p $DEST safe_chown -R $STACK_USER $DEST safe_chmod 0755 $DEST -# a basic test for $DEST path permissions (fatal on error unless skipped) +# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} # Destination path for service data @@ -488,6 +489,9 @@ set -o errexit # an error. It is also useful for following along as the install occurs. set -o xtrace +# Print the kernel version +uname -a + # Reset the bundle of CA certificates SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" rm -f $SSL_BUNDLE_FILE @@ -500,7 +504,7 @@ source $TOP_DIR/lib/rpc_backend # and the specified rpc backend is available on your platform. check_rpc_backend -# Service to enable with SSL if USE_SSL is True +# Service to enable with SSL if ``USE_SSL`` is True SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then @@ -514,7 +518,7 @@ fi # defaults before other services are run run_phase override_defaults -# Import apache functions +# Import Apache functions source $TOP_DIR/lib/apache # Import TLS functions @@ -598,8 +602,9 @@ function read_password { # Database Configuration +# ---------------------- -# To select between database backends, add the following to ``localrc``: +# To select between database backends, add the following to ``local.conf``: # # disable_service mysql # enable_service postgresql @@ -611,9 +616,10 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || # Queue Configuration +# ------------------- # Rabbit connection info -# In multi node devstack, second node needs RABBIT_USERID, but rabbit +# In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit # isn't enabled. RABBIT_USERID=${RABBIT_USERID:-stackrabbit} if is_service_enabled rabbit; then @@ -623,6 +629,7 @@ fi # Keystone +# -------- if is_service_enabled keystone; then # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is @@ -634,14 +641,14 @@ if is_service_enabled keystone; then read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` - # service in ``localrc`` (e.g. ``enable_service ldap``). + # service in ``local.conf`` (e.g. ``enable_service ldap``). # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` - # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the + # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``local.conf``. To enable the # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. - # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. + # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``local.conf``. - # only request ldap password if the service is enabled + # Only request LDAP password if the service is enabled if is_service_enabled ldap; then read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" fi @@ -649,6 +656,7 @@ fi # Swift +# ----- if is_service_enabled s-proxy; then # We only ask for Swift Hash if we have enabled swift service. @@ -672,14 +680,14 @@ fi echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate python environment +# Configure an appropriate Python environment if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} -# Install python packages into a virtualenv so that we can track them +# Install Python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" pip_install -U virtualenv @@ -728,10 +736,10 @@ echo_summary "Installing OpenStack project source" # Install required infra support libraries install_infra -# Install oslo libraries that have graduated +# Install Oslo libraries install_oslo -# Install clients libraries +# Install client libraries install_keystoneclient install_glanceclient install_cinderclient @@ -749,7 +757,6 @@ fi # Install middleware install_keystonemiddleware - if is_service_enabled keystone; then if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone @@ -766,7 +773,7 @@ if is_service_enabled s-proxy; then # swift3 middleware to provide S3 emulation to Swift if is_service_enabled swift3; then - # replace the nova-objectstore port by the swift port + # Replace the nova-objectstore port by the swift port S3_SERVICE_PORT=8080 git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH setup_develop $SWIFT3_DIR @@ -774,23 +781,25 @@ if is_service_enabled s-proxy; then fi if is_service_enabled g-api n-api; then - # image catalog service + # Image catalog service stack_install_service glance configure_glance fi if is_service_enabled cinder; then + # Block volume service stack_install_service cinder configure_cinder fi if is_service_enabled neutron; then + # Network service stack_install_service neutron install_neutron_third_party fi if is_service_enabled nova; then - # compute service + # Compute service stack_install_service nova cleanup_nova configure_nova @@ -822,18 +831,18 @@ if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then configure_CA init_CA init_cert - # Add name to /etc/hosts - # don't be naive and add to existing line! + # Add name to ``/etc/hosts``. + # Don't be naive and add to existing line! fi + # Extras Install # -------------- # Phase: install run_phase stack install - -# install the OpenStack client, needed for most setup commands +# Install the OpenStack client, needed for most setup commands if use_library_from_git "python-openstackclient"; then git_clone_by_name "python-openstackclient" setup_dev_lib "python-openstackclient" @@ -841,7 +850,6 @@ else pip_install 'python-openstackclient>=1.0.2' fi - if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -934,7 +942,7 @@ if [[ "$USE_SCREEN" == "True" ]]; then screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true fi -# Clear screen rc file +# Clear ``screenrc`` file SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc if [[ -e $SCREENRC ]]; then rm -f $SCREENRC @@ -943,14 +951,16 @@ fi # Initialize the directory for service status check init_service_check + +# Start Services +# ============== + # Dstat -# ------- +# ----- # A better kind of sysstat, with the top process per time slice start_dstat -# Start Services -# ============== # Keystone # -------- @@ -972,7 +982,7 @@ if is_service_enabled keystone; then SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Setup OpenStackclient token-flow auth + # Setup OpenStackClient token-endpoint auth export OS_TOKEN=$SERVICE_TOKEN export OS_URL=$SERVICE_ENDPOINT @@ -994,10 +1004,10 @@ if is_service_enabled keystone; then create_heat_accounts fi - # Begone token-flow auth + # Begone token auth unset OS_TOKEN OS_URL - # Set up password-flow auth creds now that keystone is bootstrapped + # Set up password auth credentials now that Keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin @@ -1042,7 +1052,7 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron - # Run init_neutron only on the node hosting the neutron API server + # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then init_neutron fi @@ -1118,6 +1128,7 @@ if is_service_enabled nova; then init_nova_cells fi + # Extras Configuration # ==================== @@ -1128,7 +1139,7 @@ run_phase stack post-config # Local Configuration # =================== -# Apply configuration from local.conf if it exists for layer 2 services +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-config merge_config_group $TOP_DIR/local.conf post-config @@ -1150,18 +1161,16 @@ if is_service_enabled glance; then start_glance fi + # Install Images # ============== -# Upload an image to glance. +# Upload an image to Glance. # -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending # scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` if is_service_enabled g-reg; then TOKEN=$(keystone token-get | grep ' id ' | get_field 2) @@ -1179,7 +1188,7 @@ if is_service_enabled g-reg; then done fi -# Create an access key and secret key for nova ec2 register image +# Create an access key and secret key for Nova EC2 register image if is_service_enabled keystone && is_service_enabled swift3 && is_service_enabled nova; then eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret) iniset $NOVA_CONF DEFAULT s3_access_key "$access" @@ -1242,7 +1251,7 @@ if is_service_enabled ceilometer; then start_ceilometer fi -# Configure and launch heat engine, api and metadata +# Configure and launch Heat engine, api and metadata if is_service_enabled heat; then # Initialize heat echo_summary "Configuring Heat" @@ -1287,30 +1296,34 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done -# Local Configuration -# =================== +# Wrapup configuration +# ==================== -# Apply configuration from local.conf if it exists for layer 2 services +# local.conf extra +# ---------------- + +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: extra merge_config_group $TOP_DIR/local.conf extra # Run extras -# ========== +# ---------- # Phase: extra run_phase stack extra -# Local Configuration -# =================== -# Apply configuration from local.conf if it exists for layer 2 services +# local.conf post-extra +# --------------------- + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-extra merge_config_group $TOP_DIR/local.conf post-extra # Run local script -# ================ +# ---------------- # Run ``local.sh`` if it exists to perform user-managed tasks if [[ -x $TOP_DIR/local.sh ]]; then @@ -1338,6 +1351,7 @@ if is_service_enabled cinder; then fi fi + # Fin # === @@ -1354,11 +1368,12 @@ fi # Using the cloud -# --------------- +# =============== echo "" echo "" echo "" +echo "This is your host ip: $HOST_IP" # If you installed Horizon on this server you should be able # to access the site using your browser. @@ -1368,15 +1383,11 @@ fi # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled keystone; then - echo "Keystone is serving at $KEYSTONE_SERVICE_URI/v2.0/" - echo "Examples on using novaclient command line is in exercise.sh" + echo "Keystone is serving at $KEYSTONE_SERVICE_URI/" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" fi -# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address -echo "This is your host ip: $HOST_IP" - # Warn that a deprecated feature was used if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" diff --git a/stackrc b/stackrc index 143298c347..c27ead3c24 100644 --- a/stackrc +++ b/stackrc @@ -5,7 +5,7 @@ # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) -# Source required devstack functions and globals +# Source required DevStack functions and globals source $RC_DIR/functions # Destination path for installation @@ -41,20 +41,20 @@ REGION_NAME=${REGION_NAME:-RegionOne} # enable_service q-dhcp # enable_service q-l3 # enable_service q-meta -# # Optional, to enable tempest configuration as part of devstack +# # Optional, to enable tempest configuration as part of DevStack # enable_service tempest -# this allows us to pass ENABLED_SERVICES +# This allows us to pass ``ENABLED_SERVICES`` if ! isset ENABLED_SERVICES ; then - # core compute (glance / keystone / nova (+ nova-network)) + # Compute (Glance / Keystone / Nova (+ nova-network)) ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth - # cinder + # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol - # heat + # Heat ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw - # dashboard + # Dashboard ENABLED_SERVICES+=,horizon - # additional services + # Additional services ENABLED_SERVICES+=,rabbit,tempest,mysql fi @@ -79,7 +79,7 @@ ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from # ``ENABLED_SERVICES`` in that the project names are used here rather than -# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" +# the service names, i.e.: ``TEMPEST_SERVICES="key,glance,nova"`` TEMPEST_SERVICES="" # Set the default Nova APIs to enable @@ -145,6 +145,7 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # but pass through any extras) REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict} + # Repositories # ------------ @@ -155,16 +156,17 @@ GIT_BASE=${GIT_BASE:-git://git.openstack.org} # Which libraries should we install from git instead of using released # versions on pypi? # -# By default devstack is now installing libraries from pypi instead of +# By default DevStack is now installing libraries from pypi instead of # from git repositories by default. This works great if you are # developing server components, but if you want to develop libraries -# and see them live in devstack you need to tell devstack it should +# and see them live in DevStack you need to tell DevStack it should # install them from git. # # ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config # # Will install those 2 libraries from git, the rest from pypi. + ############## # # OpenStack Server Components @@ -231,6 +233,7 @@ SWIFT_BRANCH=${SWIFT_BRANCH:-master} TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} TROVE_BRANCH=${TROVE_BRANCH:-master} + ############## # # Testing Components @@ -306,6 +309,7 @@ GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} # this doesn't exist in a lib file, so set it here GITDIR["python-openstackclient"]=$DEST/python-openstackclient + ################### # # Oslo Libraries @@ -396,6 +400,7 @@ GITBRANCH["tooz"]=${TOOZ_BRANCH:-master} GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} GITBRANCH["pbr"]=${PBR_BRANCH:-master} + ################## # # Libraries managed by OpenStack programs (non oslo) @@ -453,6 +458,7 @@ OCC_BRANCH=${OCC_BRANCH:-master} ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} ORC_BRANCH=${ORC_BRANCH:-master} + ################# # # 3rd Party Components (non pip installable) @@ -474,7 +480,6 @@ SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.gi SPICE_BRANCH=${SPICE_BRANCH:-master} - # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core @@ -641,7 +646,7 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) # Set fixed and floating range here so we can make sure not to use addresses # from either range when attempting to guess the IP to use for the host. -# Note that setting FIXED_RANGE may be necessary when running DevStack +# Note that setting ``FIXED_RANGE`` may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} @@ -669,9 +674,10 @@ LOG_COLOR=$(trueorfalse True LOG_COLOR) # Set to 0 to disable shallow cloning GIT_DEPTH=${GIT_DEPTH:-0} -# Use native SSL for servers in SSL_ENABLED_SERVICES +# Use native SSL for servers in ``SSL_ENABLED_SERVICES`` USE_SSL=$(trueorfalse False USE_SSL) + # Following entries need to be last items in file # Compatibility bits required by other callers like Grenade @@ -693,7 +699,6 @@ USE_SSL=$(trueorfalse False USE_SSL) # For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR # symlinks to SCREEN_LOGDIR (compat) - # Set up new logging defaults if [[ -z "${LOGDIR:-}" ]]; then default_logdir=$DEST/logs @@ -718,8 +723,8 @@ if [[ -z "${LOGDIR:-}" ]]; then unset default_logdir logfile fi -# LOGDIR is always set at this point so it is not useful as a 'enable' for service logs -# SCREEN_LOGDIR may be set, it is useful to enable the compat symlinks +# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs +# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks # Local variables: # mode: shell-script diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 2aa0a0ac04..fda86c05cd 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -2,8 +2,8 @@ # **build_docs.sh** - Build the docs for DevStack # -# - Install shocco if not found on PATH and INSTALL_SHOCCO is set -# - Clone MASTER_REPO branch MASTER_BRANCH +# - Install shocco if not found on ``PATH`` and ``INSTALL_SHOCCO`` is set +# - Clone ``MASTER_REPO`` branch ``MASTER_BRANCH`` # - Re-creates ``doc/build/html`` directory from existing repo + new generated script docs # Usage: @@ -16,7 +16,7 @@ HTML_BUILD=doc/build/html -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support diff --git a/tools/build_venv.sh b/tools/build_venv.sh index 11d1d35208..cfa39a82e0 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -4,11 +4,12 @@ # # build_venv.sh venv-path [package [...]] # +# Installs basic common prereq packages that require compilation +# to allow quick copying of resulting venv as a baseline +# # Assumes: # - a useful pip is installed # - virtualenv will be installed by pip -# - installs basic common prereq packages that require compilation -# to allow quick copying of resulting venv as a baseline VENV_DEST=${1:-.venv} @@ -16,14 +17,14 @@ shift MORE_PACKAGES="$@" -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit set -o nounset - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh index f1740dfbd6..c57568fa64 100755 --- a/tools/build_wheels.sh +++ b/tools/build_wheels.sh @@ -4,21 +4,22 @@ # # build_wheels.sh [package [...]] # -# System package prerequisites listed in files/*/devlibs will be installed +# System package prerequisites listed in ``files/*/devlibs`` will be installed # # Builds wheels for all virtual env requirements listed in # ``venv-requirements.txt`` plus any supplied on the command line. # -# Assumes ``tools/install_pip.sh`` has been run and a suitable pip/setuptools is available. +# Assumes: +# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available. -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit set -o nounset - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files @@ -59,7 +60,7 @@ virtualenv $TMP_VENV_PATH # Install modern pip and wheel PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel -# VENV_PACKAGES is a list of packages we want to pre-install +# ``VENV_PACKAGES`` is a list of packages we want to pre-install VENV_PACKAGE_FILE=$FILES/venv-requirements.txt if [[ -r $VENV_PACKAGE_FILE ]]; then VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE) diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 9c29ecd901..b49164b22a 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -17,7 +17,7 @@ set -o errexit -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f8edd16ecd..2efb4e0987 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -17,7 +17,7 @@ # - uninstall firewalld (f20 only) -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit @@ -27,7 +27,7 @@ if [[ -z "$TOP_DIR" ]]; then TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - # Change dir to top of devstack + # Change dir to top of DevStack cd $TOP_DIR # Import common functions @@ -38,7 +38,7 @@ fi # Keystone Port Reservation # ------------------------- -# Reserve and prevent $KEYSTONE_AUTH_PORT and $KEYSTONE_AUTH_PORT_INT from +# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from # being used as ephemeral ports by the system. The default(s) are 35357 and # 35358 which are in the Linux defined ephemeral port range (in disagreement # with the IANA ephemeral port range). This is a workaround for bug #1253482 @@ -47,9 +47,9 @@ fi # exception into the Kernel for the Keystone AUTH ports. keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} -# only do the reserved ports when available, on some system (like containers) +# Only do the reserved ports when available, on some system (like containers) # where it's not exposed we are almost pretty sure these ports would be -# exclusive for our devstack. +# exclusive for our DevStack. if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then # Get any currently reserved ports, strip off leading whitespace reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') @@ -59,7 +59,7 @@ if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} else # If there are currently reserved ports, keep those and also reserve the - # keystone specific ports. Duplicate reservations are merged into a single + # Keystone specific ports. Duplicate reservations are merged into a single # reservation (or range) automatically by the kernel. sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} fi diff --git a/tools/image_list.sh b/tools/image_list.sh index 88c1d09379..204280704e 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) source $TOP_DIR/functions diff --git a/tools/info.sh b/tools/info.sh index a8f9544073..433206e8ae 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -2,7 +2,7 @@ # **info.sh** -# Produce a report on the state of devstack installs +# Produce a report on the state of DevStack installs # # Output fields are separated with '|' chars # Output types are git,localrc,os,pip,pkg: @@ -14,7 +14,7 @@ # pkg|| function usage { - echo "$0 - Report on the devstack configuration" + echo "$0 - Report on the DevStack configuration" echo "" echo "Usage: $0" exit 1 diff --git a/tools/install_pip.sh b/tools/install_pip.sh index b7b40c7486..0f7c962b2b 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -16,7 +16,7 @@ set -o xtrace TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` -# Change dir to top of devstack +# Change dir to top of DevStack cd $TOP_DIR # Import common functions @@ -42,11 +42,11 @@ function get_versions { function install_get_pip { - # the openstack gate and others put a cached version of get-pip.py + # The OpenStack gate and others put a cached version of get-pip.py # for this to find, explicitly to avoid download issues. # - # However, if devstack *did* download the file, we want to check - # for updates; people can leave thier stacks around for a long + # However, if DevStack *did* download the file, we want to check + # for updates; people can leave their stacks around for a long # time and in the mean-time pip might get upgraded. # # Thus we use curl's "-z" feature to always check the modified @@ -74,7 +74,7 @@ function configure_pypi_alternative_url { touch $PIP_CONFIG_FILE fi if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then - #it means that the index-url does not exist + # It means that the index-url does not exist iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE" fi diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 917980ccc5..a07e58d3e6 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -18,10 +18,10 @@ while getopts ":f" opt; do esac done -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions @@ -65,7 +65,7 @@ PACKAGES=$(get_packages general $ENABLED_SERVICES) PACKAGES="$PACKAGES $(get_plugin_packages)" if is_ubuntu && echo $PACKAGES | grep -q dkms ; then - # ensure headers for the running kernel are installed for any DKMS builds + # Ensure headers for the running kernel are installed for any DKMS builds PACKAGES="$PACKAGES linux-headers-$(uname -r)" fi diff --git a/tools/ironic/scripts/create-node b/tools/ironic/scripts/create-node index 25b53d47f3..b018acddc9 100755 --- a/tools/ironic/scripts/create-node +++ b/tools/ironic/scripts/create-node @@ -6,13 +6,13 @@ set -ex -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) NAME=$1 CPU=$2 MEM=$(( 1024 * $3 )) -# extra G to allow fuzz for partition table : flavor size and registered size +# Extra G to allow fuzz for partition table : flavor size and registered size # need to be different to actual size. DISK=$(( $4 + 1)) diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network index e326bf8ccd..83308ed416 100755 --- a/tools/ironic/scripts/setup-network +++ b/tools/ironic/scripts/setup-network @@ -9,7 +9,7 @@ set -exu LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) BRIDGE_SUFFIX=${1:-''} BRIDGE_NAME=brbm$BRIDGE_SUFFIX @@ -19,7 +19,7 @@ export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" # Only add bridge if missing (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} -# remove bridge before replacing it. +# Remove bridge before replacing it. (virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} (virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} diff --git a/tools/outfilter.py b/tools/outfilter.py index 9686a387c2..f82939be1d 100755 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -14,8 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. -# This is an output filter to filter and timestamp the logs from grenade and -# devstack. Largely our awk filters got beyond the complexity level which were +# This is an output filter to filter and timestamp the logs from Grenade and +# DevStack. Largely our awk filters got beyond the complexity level which were # sustainable, so this provides us much more control in a single place. # # The overhead of running python should be less than execing `date` a million @@ -32,7 +32,7 @@ HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): parser = argparse.ArgumentParser( - description='Filter output by devstack and friends') + description='Filter output by DevStack and friends') parser.add_argument('-o', '--outfile', help='Output file for content', default=None) @@ -52,7 +52,7 @@ def main(): if opts.outfile: outfile = open(opts.outfile, 'a', 0) - # otherwise fileinput reprocess args as files + # Otherwise fileinput reprocess args as files sys.argv = [] while True: line = sys.stdin.readline() @@ -63,9 +63,9 @@ def main(): if skip_line(line): continue - # this prevents us from nesting date lines, because - # we'd like to pull this in directly in grenade and not double - # up on devstack lines + # This prevents us from nesting date lines, because + # we'd like to pull this in directly in Grenade and not double + # up on DevStack lines if HAS_DATE.search(line) is None: now = datetime.datetime.utcnow() line = ("%s | %s" % ( diff --git a/unstack.sh b/unstack.sh index c45af7400c..30981fd3c6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -19,7 +19,7 @@ while getopts ":a" opt; do esac done -# Keep track of the current devstack directory. +# Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) FILES=$TOP_DIR/files