From 74e965f0dbdce7807b4e9146eaa8de3b5bd75838 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 16 Sep 2011 14:19:46 -0700 Subject: [PATCH 1/6] more updates to how images are installed --- stack.sh | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index b47729e190..660a52981f 100755 --- a/stack.sh +++ b/stack.sh @@ -418,14 +418,13 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then # extract ami-tty/image, aki-tty/image & ari-tty/image mkdir -p $FILES/images - cd $FILES/images - tar -zxf $DEST/tty.tgz + tar -zxf $FILES/tty.tgz -C $FILES/images # add images to glance # FIXME: kernel/ramdisk is hardcoded - use return result from add - glance add name="tty-kernel" is_public=true container_format=aki disk_format=aki < aki-tty/image - glance add name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < ari-tty/image - glance add name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < ami-tty/image + glance add name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image + glance add name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image + glance add name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < $FILES/images/ami-tty/image fi # Using the cloud From eba18fbfa52b38fd1497cc87c03dd8674a9a1fa8 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 16 Sep 2011 14:35:14 -0700 Subject: [PATCH 2/6] add note to fix the process of adding user to group --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 660a52981f..55ef2b512d 100755 --- a/stack.sh +++ b/stack.sh @@ -320,6 +320,8 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then sudo modprobe nbd || true sudo modprobe kvm || true # user needs to be member of libvirtd group for nova-compute to use libvirt + ## FIXME: this doesn't affect the current shell so you end up with a failed + ## launch of nova-compute sudo usermod -a -G libvirtd `whoami` # if kvm wasn't running before we need to restart libvirt to enable it sudo /etc/init.d/libvirt-bin restart From 1c9f0afeb33f60825c2daea56d73223845a5855d Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Sep 2011 14:36:11 -0700 Subject: [PATCH 3/6] add multi_host option --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 55ef2b512d..a093b24a17 100755 --- a/stack.sh +++ b/stack.sh @@ -308,6 +308,9 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" if [ -n "$FLAT_INTERFACE" ]; then add_nova_flag "--flat_interface=$FLAT_INTERFACE" fi +if [ -n "$MULTI_HOST" ]; then + add_nova_flag "--multi_host=$MULTI_HOST" +fi # create a new named screen to store things in screen -d -m -S nova -t nova From 23761c3553165e4a3c2ef1e15613be0d495e1f4d Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Sep 2011 14:54:20 -0700 Subject: [PATCH 4/6] floating ip support, and cleanup functionality --- build_lxc.sh | 5 +++++ build_lxc_multi.sh | 21 ++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/build_lxc.sh b/build_lxc.sh index c1ab995b0c..787c4bdf85 100755 --- a/build_lxc.sh +++ b/build_lxc.sh @@ -69,6 +69,11 @@ fi # Destroy the old container lxc-destroy -n $CONTAINER +# If this call is to TERMINATE the container then exit +if [ "$TERMINATE" = "1" ]; then + exit +fi + # Create the container lxc-create -n $CONTAINER -t natty -f $LXC_CONF diff --git a/build_lxc_multi.sh b/build_lxc_multi.sh index efa7deb8c8..50be4f574f 100755 --- a/build_lxc_multi.sh +++ b/build_lxc_multi.sh @@ -4,16 +4,21 @@ HEAD_HOST=${HEAD_HOST:-192.168.1.52} COMPUTE_HOSTS=${COMPUTE_HOSTS:-192.168.1.53,192.168.1.54} # Networking params -NAMESERVER=${NAMESERVER:-192.168.2.1} +NAMESERVER=${NAMESERVER:-192.168.1.1} GATEWAY=${GATEWAY:-192.168.1.1} +NETMASK=${NETMASK:-255.255.255.0} +FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} + +# Setting this to 1 shuts down and destroys our containers without relaunching. +TERMINATE=${TERMINATE:-0} # Variables common amongst all hosts in the cluster -COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0" +COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1" # Helper to launch containers function run_lxc { # For some reason container names with periods can cause issues :/ - CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh + CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh } # Launch the head node - headnode uses a non-ip domain name, @@ -21,10 +26,12 @@ function run_lxc { run_lxc STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,dash,mysql,rabbit" # Wait till the head node is up -while ! wget -q -O- http://$HEAD_HOST | grep -q username; do - echo "Waiting for head node ($HEAD_HOST) to start..." - sleep 5 -done +if [ ! "$TERMINATE" = "1" ]; then + while ! wget -q -O- http://$HEAD_HOST | grep -q username; do + echo "Waiting for head node ($HEAD_HOST) to start..." + sleep 5 + done +fi # Launch the compute hosts for compute_host in ${COMPUTE_HOSTS//,/ }; do From e30432f6251edf61ad04dd40a7efa43da239f3a1 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 16 Sep 2011 14:54:48 -0700 Subject: [PATCH 5/6] attempt to fix group issue --- stack.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index a093b24a17..ba88a01b4f 100755 --- a/stack.sh +++ b/stack.sh @@ -322,9 +322,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # device - used to manage qcow images) sudo modprobe nbd || true sudo modprobe kvm || true - # user needs to be member of libvirtd group for nova-compute to use libvirt - ## FIXME: this doesn't affect the current shell so you end up with a failed - ## launch of nova-compute + # User needs to be member of libvirtd group for nova-compute to use libvirt. sudo usermod -a -G libvirtd `whoami` # if kvm wasn't running before we need to restart libvirt to enable it sudo /etc/init.d/libvirt-bin restart @@ -404,7 +402,10 @@ screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.con screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" screen_it key "$KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF" screen_it n-api "$NOVA_DIR/bin/nova-api" -screen_it n-cpu "$NOVA_DIR/bin/nova-compute" +# launch nova-compute with a new bash, since user won't be a member of libvirtd +# group in the current shell context (due to how linux works). +# TODO: newgrp might work instead... +screen_it n-cpu "bash -c $NOVA_DIR/bin/nova-compute" screen_it n-net "$NOVA_DIR/bin/nova-network" screen_it n-sch "$NOVA_DIR/bin/nova-scheduler" # nova-vncproxy binds a privileged port, and so needs sudo From 1f7176011008c77bdf0a0ec8138755dfb0a769e3 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 16 Sep 2011 15:18:53 -0700 Subject: [PATCH 6/6] use newgrp to launch nova-compute in a new context --- stack.sh | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index ba88a01b4f..e9218b8d2b 100755 --- a/stack.sh +++ b/stack.sh @@ -389,9 +389,9 @@ fi # so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -NL=`echo -ne '\015'` - +# our screen helper to launch a service in a hidden named screen function screen_it { + NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then screen -S nova -X screen -t $1 screen -S nova -p $1 -X stuff "$2$NL" @@ -402,10 +402,13 @@ screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.con screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" screen_it key "$KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF" screen_it n-api "$NOVA_DIR/bin/nova-api" -# launch nova-compute with a new bash, since user won't be a member of libvirtd -# group in the current shell context (due to how linux works). -# TODO: newgrp might work instead... -screen_it n-cpu "bash -c $NOVA_DIR/bin/nova-compute" +# Launching nova-compute should be as simple as running ``nova-compute`` but +# have to do a little more than that in our script. Since we add the group +# ``libvirtd`` to our user in this script, when nova-compute is run it is +# within the context of our original shell (so our groups won't be updated). +# We can send the command nova-compute to the ``newgrp`` command to execute +# in a specific context. +screen_it n-cpu "echo $NOVA_DIR/bin/nova-compute | newgrp libvirtd" screen_it n-net "$NOVA_DIR/bin/nova-network" screen_it n-sch "$NOVA_DIR/bin/nova-scheduler" # nova-vncproxy binds a privileged port, and so needs sudo