# lib/nova # Functions to control the configuration and operation of the XXXX service # Dependencies: # ``functions`` file # ``DEST``, ``DATA_DIR`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``LIBVIRT_TYPE`` must be defined # ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined # ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # # install_nova # configure_nova # create_nova_conf # init_nova # start_nova # stop_nova # cleanup_nova # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories NOVA_DIR=$DEST/nova NOVACLIENT_DIR=$DEST/python-novaclient NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Support entry points installation of console scripts if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin else NOVA_BIN_DIR=/usr/local/bin fi # Set the paths of certain binaries NOVA_ROOTWRAP=$(get_rootwrap_location nova) # Allow rate limiting to be turned off for testing, like for Tempest # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} QEMU_CONF=/etc/libvirt/qemu.conf # Entry Points # ------------ function add_nova_opt { echo "$1" >>$NOVA_CONF } # Helper to clean iptables rules function clean_iptables() { # Delete rules sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash # Delete nat rules sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash # Delete chains sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash # Delete nat chains sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash } # cleanup_nova() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_nova() { if is_service_enabled n-cpu; then # Clean iptables from previous runs clean_iptables # Destroy old instances instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true echo $instances | xargs -n1 sudo virsh undefine || true fi # Logout and delete iscsi sessions sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi } # configure_novaclient() - Set config files, create data dirs, etc function configure_novaclient() { setup_develop $NOVACLIENT_DIR } # configure_nova_rootwrap() - configure Nova's rootwrap function configure_nova_rootwrap() { # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $NOVA_CONF_DIR/rootwrap.d fi # Deploy filters to /etc/nova/rootwrap.d sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to nova-rootwrap ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" # Set up the rootwrap sudoers for nova TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap } # configure_nova() - Set config files, create data dirs, etc function configure_nova() { setup_develop $NOVA_DIR # Put config files in ``/etc/nova`` for everyone to find if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR fi sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR configure_nova_rootwrap if is_service_enabled n-api; then # Use the sample http middleware configuration supplied in the # Nova sources. This paste config adds the configuration required # for Nova to validate Keystone tokens. # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR # Rewrite the authtoken configuration for our Keystone service. # This is a bit defensive to allow the sample file some variance. sed -e " /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; /admin_user/s/^.*$/admin_user = nova/; /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; " -i $NOVA_API_PASTE_INI iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST if is_service_enabled tls-proxy; then iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL fi fi iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 # Attempt to load modules: network block device - used to manage qcow images sudo modprobe nbd || true # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then sudo modprobe kvm || true if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on fi fi fi # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if is_ubuntu; then if [[ ! "$DISTRO" > natty ]]; then cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup if ! grep -q cgroup /etc/fstab; then echo "$cgline" | sudo tee -a /etc/fstab fi if ! mount -n | grep -q cgroup; then sudo mount /cgroup fi fi fi fi if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd Action=org.libvirt.unix.manage ResultAny=yes ResultInactive=yes ResultActive=yes EOF' elif is_suse; then # Work around the fact that polkit-default-privs overrules pklas # with 'unix-group:$group'. sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-user:$USER Action=org.libvirt.unix.manage ResultAny=yes ResultInactive=yes ResultActive=yes EOF" fi # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group libvirtd >/dev/null; then sudo groupadd libvirtd fi add_user_to_group `whoami` libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. restart_service $LIBVIRT_DAEMON # Instance Storage # ---------------- # Nova stores each instance in its own directory. mkdir -p $NOVA_INSTANCES_PATH # You can specify a different disk to be mounted and used for backing the # virtual machines. If there is a partition labeled nova-instances we # mount it (ext filesystems can be labeled via e2label). if [ -L /dev/disk/by-label/nova-instances ]; then if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then sudo mount -L nova-instances $NOVA_INSTANCES_PATH sudo chown -R `whoami` $NOVA_INSTANCES_PATH fi fi # Clean up old instances cleanup_nova fi } # create_nova_accounts() - Set up common required nova accounts # Tenant User Roles # ------------------------------------------------------------------ # service nova admin, [ResellerAdmin (swift only)] # Migrated from keystone_data.sh create_nova_accounts() { SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then NOVA_USER=$(keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $NOVA_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then NOVA_SERVICE=$(keystone service-create \ --name=nova \ --type=compute \ --description="Nova Compute Service" \ | grep " id " | get_field 2) keystone endpoint-create \ --region RegionOne \ --service_id $NOVA_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" fi fi } # create_nova_conf() - Create a new nova.conf file function create_nova_conf() { # Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf # (Re)create ``nova.conf`` rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" add_nova_opt "force_dhcp_release=True" add_nova_opt "fixed_range=$FIXED_RANGE" add_nova_opt "s3_host=$SERVICE_HOST" add_nova_opt "s3_port=$S3_SERVICE_PORT" add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" add_nova_opt "my_ip=$HOST_IP" local dburl database_connection_url dburl nova add_nova_opt "sql_connection=$dburl" add_nova_opt "libvirt_type=$LIBVIRT_TYPE" add_nova_opt "libvirt_cpu_mode=none" add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT" fi fi if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" fi if [ -n "$NOVA_STATE_PATH" ]; then add_nova_opt "state_path=$NOVA_STATE_PATH" add_nova_opt "lock_path=$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then add_nova_opt "multi_host=True" add_nova_opt "send_arp_for_ha=True" fi if [ "$SYSLOG" != "False" ]; then add_nova_opt "use_syslog=True" fi if [ "$API_RATE_LIMIT" != "True" ]; then add_nova_opt "api_rate_limit=False" fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" add_nova_opt "instance_usage_audit_period=hour" add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier" add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then EXTRA_OPTS=$EXTRA_FLAGS fi # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do # Attempt to convert flags to options add_nova_opt ${I//--} done } # init_nova() - Initialize databases, etc. function init_nova() { # Nova Database # ------------- # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the # 082_essex.py in nova) recreate_database nova latin1 # (Re)create nova database $NOVA_BIN_DIR/nova-manage db sync fi # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown `whoami` $NOVA_AUTH_CACHE_DIR } # install_novaclient() - Collect source and prepare function install_novaclient() { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH } # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then if is_ubuntu; then install_package libvirt-bin elif is_fedora || is_suse; then install_package libvirt else exit_distro_not_supported "libvirt installation" fi # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if is_ubuntu; then if [[ "$DISTRO" > natty ]]; then install_package cgroup-lite fi else ### FIXME(dtroyer): figure this out echo "RPM-based cgroup not implemented yet" yum_install libcgroup-tools fi fi fi git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH } # start_nova_api() - Start the API process ahead of other things function start_nova_api() { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT if is_service_enabled tls-proxy; then service_port=$NOVA_SERVICE_PORT_INT fi screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then echo "nova-api did not start" exit 1 fi # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & fi } # start_nova() - Start running processes, including screen function start_nova() { # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond; do screen -S $SCREEN_NAME -p $serv -X kill done } # Restore xtrace $XTRACE