Merge "Replace screen_it() with run_process() throughout"

This commit is contained in:
Jenkins 2014-09-13 12:38:34 +00:00 committed by Gerrit Code Review
commit efa18c73ab
21 changed files with 136 additions and 95 deletions

View File

@ -1136,10 +1136,13 @@ function zypper_install {
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
# _old_run_process service "command-line"
# If an optional group is provided sg will be used to set the group of
# the command.
# _run_process service "command-line" [group]
function _run_process {
local service=$1
local command="$2"
local group=$3
# Undo logging redirections and close the extra descriptors
exec 1>&3
@ -1148,8 +1151,8 @@ function _run_process {
exec 6>&-
if [[ -n ${SCREEN_LOGDIR} ]]; then
exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
exec 1>&${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log 2>&1
ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
export PYTHONUNBUFFERED=1
@ -1157,7 +1160,11 @@ function _run_process {
# Run under ``setsid`` to force the process to become a session and group leader.
# The pid saved can be used with pkill -g to get the entire process group.
setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$1.pid
if [[ -n "$group" ]]; then
setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
else
setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
# Just silently exit this process
exit 0
@ -1190,17 +1197,20 @@ function is_running {
# Run a single service under screen or directly
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# run_process service "command-line"
# If an optional group is provided sg will be used to run the
# command as that group.
# run_process service "command-line" [group]
function run_process {
local service=$1
local command="$2"
local group=$3
if is_service_enabled $service; then
if [[ "$USE_SCREEN" = "True" ]]; then
screen_service "$service" "$command"
screen_service "$service" "$command" "$group"
else
# Spawn directly without screen
_run_process "$service" "$command" &
_run_process "$service" "$command" "$group" &
fi
fi
}
@ -1208,11 +1218,13 @@ function run_process {
# Helper to launch a service in a named screen
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_NAME``, ``SCREEN_LOGDIR``,
# ``SERVICE_DIR``, ``USE_SCREEN``
# screen_service service "command-line"
# Run a command in a shell in a screen window
# screen_service service "command-line" [group]
# Run a command in a shell in a screen window, if an optional group
# is provided, use sg to set the group of the command.
function screen_service {
local service=$1
local command="$2"
local group=$3
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
@ -1242,8 +1254,11 @@ function screen_service {
# - the server process is brought back to the foreground
# - if the server process exits prematurely the fg command errors
# and a message is written to stdout and the service failure file
# The pid saved can be used in screen_stop() as a process group
# The pid saved can be used in stop_process() as a process group
# id to kill off all child processes
if [[ -n "$group" ]]; then
command="sg $group '$command'"
fi
screen -S $SCREEN_NAME -p $service -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${service}.pid; fg || echo \"$service failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${service}.failure\"$NL"
fi
}
@ -1281,7 +1296,7 @@ function screen_rc {
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
# screen_stop service
# screen_stop_service service
function screen_stop_service {
local service=$1
@ -1350,6 +1365,17 @@ function service_check {
fi
}
# Tail a log file in a screen if USE_SCREEN is true.
function tail_log {
local service=$1
local logfile=$2
USE_SCREEN=$(trueorfalse True $USE_SCREEN)
if [[ "$USE_SCREEN" = "True" ]]; then
screen_service "$service" "sudo tail -f $logfile"
fi
}
# Deprecated Functions
# --------------------
@ -1707,6 +1733,7 @@ function is_service_enabled {
# are implemented
[[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
[[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0

View File

@ -224,18 +224,18 @@ function install_ceilometerclient {
# start_ceilometer() - Start running processes, including screen
function start_ceilometer {
screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
# Start the compute agent last to allow time for the collector to
# fully wake up and connect to the message bus. See bug #1355809
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'"
run_process ceilometer-acompute "sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'"
fi
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF"
run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF"
fi
# only die on API if it was actually intended to be turned on
@ -246,15 +246,15 @@ function start_ceilometer {
fi
fi
screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
}
# stop_ceilometer() - Stop running processes
function stop_ceilometer {
# Kill the ceilometer screen windows
for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
screen_stop $serv
stop_process $serv
done
}

View File

@ -459,7 +459,7 @@ function stop_cinder {
# Kill the cinder screen windows
local serv
for serv in c-api c-bak c-sch c-vol; do
screen_stop $serv
stop_process $serv
done
if is_service_enabled c-vol; then

View File

@ -77,14 +77,14 @@ function install_ganttclient {
# start_gantt() - Start running processes, including screen
function start_gantt {
if is_service_enabled gantt; then
screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
run_process gantt "$GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
fi
}
# stop_gantt() - Stop running processes
function stop_gantt {
echo "Stop Gantt"
screen_stop gantt
stop_process gantt
}
# Restore xtrace

View File

@ -279,8 +279,8 @@ function install_glance {
# start_glance() - Start running processes, including screen
function start_glance {
screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
die $LINENO "g-api did not start"
@ -290,8 +290,8 @@ function start_glance {
# stop_glance() - Stop running processes
function stop_glance {
# Kill the Glance screen windows
screen_stop g-api
screen_stop g-reg
stop_process g-api
stop_process g-reg
}

View File

@ -189,10 +189,10 @@ function install_heat_other {
# start_heat() - Start running processes, including screen
function start_heat {
screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF"
screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF"
screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF"
screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF"
run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF"
run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF"
run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
}
# stop_heat() - Stop running processes
@ -200,7 +200,7 @@ function stop_heat {
# Kill the screen windows
local serv
for serv in h-eng h-api h-api-cfn h-api-cw; do
screen_stop $serv
stop_process $serv
done
}

View File

@ -152,6 +152,7 @@ function init_horizon {
# Remove old log files that could mess with how devstack detects whether Horizon
# has been successfully started (see start_horizon() and functions::screen_it())
# and run_process
sudo rm -f /var/log/$APACHE_NAME/horizon_*
}
@ -173,7 +174,7 @@ function install_horizon {
# start_horizon() - Start running processes, including screen
function start_horizon {
restart_apache_server
screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
tail_log horizon /var/log/$APACHE_NAME/horizon_error.log
}
# stop_horizon() - Stop running processes (non-screen)

View File

@ -381,7 +381,7 @@ function start_ironic {
# start_ironic_api() - Used by start_ironic().
# Starts Ironic API server.
function start_ironic_api {
screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
die $LINENO "ir-api did not start"
@ -391,7 +391,7 @@ function start_ironic_api {
# start_ironic_conductor() - Used by start_ironic().
# Starts Ironic conductor.
function start_ironic_conductor {
screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
# TODO(romcheg): Find a way to check whether the conductor has started.
}

View File

@ -474,11 +474,11 @@ function start_keystone {
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
restart_apache_server
screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone.log"
screen_it key-access "sudo tail -f /var/log/$APACHE_NAME/keystone_access.log"
tail_log key /var/log/$APACHE_NAME/keystone.log
tail_log key-access /var/log/$APACHE_NAME/keystone_access.log
else
# Start Keystone in a screen window
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
fi
echo "Waiting for keystone to start..."
@ -499,7 +499,7 @@ function start_keystone {
# stop_keystone() - Stop running processes
function stop_keystone {
# Kill the Keystone screen window
screen_stop key
stop_process key
# Cleanup the WSGI files and VHOST
_cleanup_keystone_apache_wsgi
}

View File

@ -591,7 +591,7 @@ function install_neutron_agent_packages {
function start_neutron_service_and_check {
local cfg_file_options="$(determine_config_files neutron-server)"
# Start the Neutron service
screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
echo "Waiting for Neutron to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
die $LINENO "Neutron did not start"
@ -601,8 +601,8 @@ function start_neutron_service_and_check {
# Start running processes, including screen
function start_neutron_agents {
# Start up the neutron agents if enabled
screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
if is_provider_network; then
sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
@ -612,24 +612,24 @@ function start_neutron_agents {
fi
if is_service_enabled q-vpn; then
screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
else
screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
fi
screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
# For XenServer, start an agent for the domU openvswitch
screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
if is_service_enabled q-lbaas; then
screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
if is_service_enabled q-metering; then
screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
fi
}

View File

@ -28,12 +28,14 @@ functions to be implemented
git clone xxx
* ``start_<third_party>``:
start running processes, including screen
start running processes, including screen if USE_SCREEN=True
e.g.
screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin"
run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
* ``stop_<third_party>``:
stop running processes (non-screen)
e.g.
stop_process XXXX
* ``check_<third_party>``:
verify that the integration between neutron server and third-party components is sane

View File

@ -64,7 +64,7 @@ function install_ryu {
}
function start_ryu {
screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
run_process ryu "$RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
}
function stop_ryu {

View File

@ -39,6 +39,7 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
@ -654,7 +655,7 @@ function start_nova_api {
service_port=$NOVA_SERVICE_PORT_INT
fi
screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
run_process n-api "$NOVA_BIN_DIR/nova-api"
echo "Waiting for nova-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
die $LINENO "nova-api did not start"
@ -676,18 +677,24 @@ function start_nova_compute {
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
# sg' will be used in run_process to execute nova-compute as a member of the
# **$LIBVIRT_GROUP** group.
run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
local i
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
# Avoid process redirection of fake host configurations by
# creating or modifying real configurations. Each fake
# gets its own configuration and own log file.
local fake_conf="${NOVA_FAKE_CONF}-${i}"
iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf"
done
else
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
start_nova_hypervisor
fi
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
fi
}
@ -700,25 +707,25 @@ function start_nova_rest {
local compute_cell_conf=$NOVA_CONF
fi
# ``screen_it`` checks ``is_service_enabled``, it is not needed here
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
# ``run_process`` checks ``is_service_enabled``, it is not needed here
run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
is_service_enabled swift3 || \
screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
}
function start_nova {
@ -727,7 +734,7 @@ function start_nova {
}
function stop_nova_compute {
screen_stop n-cpu
stop_process n-cpu
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
stop_nova_hypervisor
fi
@ -738,7 +745,7 @@ function stop_nova_rest {
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
screen_stop $serv
stop_process $serv
done
}

View File

@ -139,6 +139,8 @@ function start_opendaylight {
# The flags to ODL have the following meaning:
# -of13: runs ODL using OpenFlow 1.3 protocol support.
# -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
# NOTE(chdent): Leaving this as screen_it instead of run_process until
# the right thing for this service is determined.
screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
# Sleep a bit to let OpenDaylight finish starting up
@ -147,7 +149,7 @@ function start_opendaylight {
# stop_opendaylight() - Stop running processes (non-screen)
function stop_opendaylight {
screen_stop odl-server
stop_process odl-server
}
# stop_opendaylight-compute() - Remove OVS bridges

View File

@ -168,7 +168,7 @@ function install_python_saharaclient {
# start_sahara() - Start running processes, including screen
function start_sahara {
screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
}
# stop_sahara() - Stop running processes

View File

@ -659,10 +659,10 @@ function start_swift {
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
restart_apache_server
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server"
tail_log s-proxy /var/log/$APACHE_NAME/proxy-server
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
for type in object container account; do
screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1"
tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1
done
fi
return 0
@ -683,10 +683,10 @@ function start_swift {
for type in proxy ${todo}; do
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
done
screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
for type in object container account; do
screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
done
fi
@ -708,9 +708,9 @@ function stop_swift {
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
fi
# Dump all of the servers
# Maintain the iteration as screen_stop() has some desirable side-effects
# Maintain the iteration as stop_process() has some desirable side-effects
for type in proxy object container account; do
screen_stop s-${type}
stop_process s-${type}
done
# Blast out any stragglers
pkill -f swift-

View File

@ -75,13 +75,17 @@ function install_XXXX {
# start_XXXX() - Start running processes, including screen
function start_XXXX {
# screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
# The quoted command must be a single command and not include an
# shell metacharacters, redirections or shell builtins.
# run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
:
}
# stop_XXXX() - Stop running processes (non-screen)
function stop_XXXX {
# FIXME(dtroyer): stop only our screen screen window?
# for serv in serv-a serv-b; do
# stop_process $serv
# done
:
}

View File

@ -228,9 +228,9 @@ function init_trove {
# start_trove() - Start running processes, including screen
function start_trove {
screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1"
screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1"
screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1"
run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug"
run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug"
run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug"
}
# stop_trove() - Stop running processes
@ -238,7 +238,7 @@ function stop_trove {
# Kill the trove screen windows
local serv
for serv in tr-api tr-tmgr tr-cond; do
screen_stop $serv
stop_process $serv
done
}

View File

@ -162,9 +162,9 @@ function install_zaqarclient {
# start_zaqar() - Start running processes, including screen
function start_zaqar {
if [[ "$USE_SCREEN" = "False" ]]; then
screen_it zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon"
run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon"
else
screen_it zaqar-server "zaqar-server --config-file $ZAQAR_CONF"
run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF"
fi
echo "Waiting for Zaqar to start..."

View File

@ -37,7 +37,6 @@ umask 022
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
# Sanity Checks
# -------------
@ -74,7 +73,6 @@ if [[ $EUID -eq 0 ]]; then
exit 1
fi
# Prepare the environment
# -----------------------
@ -1210,7 +1208,7 @@ fi
if is_service_enabled zeromq; then
echo_summary "Starting zermomq receiver"
screen_it zeromq "cd $NOVA_DIR && $OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
fi
# Launch the nova-api and wait for it to answer before continuing
@ -1318,7 +1316,7 @@ if is_service_enabled nova && is_baremetal; then
fi
# ensure callback daemon is running
sudo pkill nova-baremetal-deploy-helper || true
screen_it baremetal "cd ; nova-baremetal-deploy-helper"
run_process baremetal "nova-baremetal-deploy-helper"
fi
# Save some values we generated for later use

View File

@ -1,9 +1,9 @@
#!/bin/bash
# tests/exec.sh - Test DevStack screen_it() and screen_stop()
# tests/exec.sh - Test DevStack run_process() and stop_process()
#
# exec.sh start|stop|status
#
# Set USE_SCREEN to change the default
# Set USE_SCREEN True|False to change use of screen.
#
# This script emulates the basic exec envirnment in ``stack.sh`` to test
# the process spawn and kill operations.
@ -94,12 +94,12 @@ fi
if [[ "$1" == "start" ]]; then
echo "Start service"
setup_screen
screen_it fake-service "$TOP_DIR/tests/fake-service.sh"
run_process fake-service "$TOP_DIR/tests/fake-service.sh"
sleep 1
status
elif [[ "$1" == "stop" ]]; then
echo "Stop service"
screen_stop fake-service
stop_process fake-service
status
elif [[ "$1" == "status" ]]; then
status