Merge pull request #167 from cloudbuilders/trunk-fixes
make some changes prepping for trunk branch
This commit is contained in:
commit
08c999d598
127
files/nova-api-paste.ini
Normal file
127
files/nova-api-paste.ini
Normal file
@ -0,0 +1,127 @@
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/: ec2versions
|
||||
/services/Cloud: ec2cloud
|
||||
/services/Admin: ec2admin
|
||||
/latest: ec2metadata
|
||||
/2007-01-19: ec2metadata
|
||||
/2007-03-01: ec2metadata
|
||||
/2007-08-29: ec2metadata
|
||||
/2007-10-10: ec2metadata
|
||||
/2007-12-15: ec2metadata
|
||||
/2008-02-01: ec2metadata
|
||||
/2008-09-01: ec2metadata
|
||||
/2009-04-04: ec2metadata
|
||||
/1.0: ec2metadata
|
||||
|
||||
[pipeline:ec2cloud]
|
||||
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2admin]
|
||||
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2metadata]
|
||||
pipeline = logrequest ec2md
|
||||
|
||||
[pipeline:ec2versions]
|
||||
pipeline = logrequest ec2ver
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:totoken]
|
||||
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:authenticate]
|
||||
paste.filter_factory = nova.api.ec2:Authenticate.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:adminrequest]
|
||||
controller = nova.api.ec2.admin.AdminController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
[app:ec2ver]
|
||||
paste.app_factory = nova.api.ec2:Versions.factory
|
||||
|
||||
[app:ec2md]
|
||||
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi]
|
||||
use = egg:Paste#urlmap
|
||||
/: osversions
|
||||
/v1.0: openstackapi10
|
||||
/v1.1: openstackapi11
|
||||
|
||||
[pipeline:openstackapi10]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
|
||||
|
||||
[pipeline:openstackapi11]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:auth]
|
||||
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
|
||||
|
||||
[app:osapiapp10]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV10.factory
|
||||
|
||||
[app:osapiapp11]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV11.factory
|
||||
|
||||
[pipeline:osversions]
|
||||
pipeline = faultwrap osversionapp
|
||||
|
||||
[app:osversionapp]
|
||||
paste.app_factory = nova.api.openstack.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 127.0.0.1
|
||||
service_port = 5000
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://127.0.0.1:5000/
|
||||
admin_token = %SERVICE_TOKEN%
|
41
stack.sh
41
stack.sh
@ -232,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
|
||||
# Multi-host is a mode where each compute node runs its own network node. This
|
||||
# allows network operations and routing for a VM to occur on the server that is
|
||||
# running the VM - removing a SPOF and bandwidth bottleneck.
|
||||
MULTI_HOST=${MULTI_HOST:-0}
|
||||
MULTI_HOST=${MULTI_HOST:-False}
|
||||
|
||||
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
|
||||
# variable but make sure that the interface doesn't already have an
|
||||
@ -325,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
# can never change.
|
||||
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
|
||||
fi
|
||||
|
||||
|
||||
# Keystone
|
||||
# --------
|
||||
|
||||
@ -564,13 +564,12 @@ fi
|
||||
# ----
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
|
||||
# We are going to use the sample http middleware configuration from the
|
||||
# keystone project to launch nova. This paste config adds the configuration
|
||||
# required for nova to validate keystone tokens - except we need to switch
|
||||
# the config to use our service token instead (instead of the invalid token
|
||||
# 999888777666).
|
||||
cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
|
||||
sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
|
||||
# We are going to use a sample http middleware configuration based on the
|
||||
# one from the keystone project to launch nova. This paste config adds
|
||||
# the configuration required for nova to validate keystone tokens. We add
|
||||
# our own service token to the configuration.
|
||||
cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
|
||||
sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
|
||||
fi
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
|
||||
@ -652,13 +651,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
USER_GROUP=$(id -g)
|
||||
sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
|
||||
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
|
||||
|
||||
|
||||
# We then create a loopback disk and format it to XFS.
|
||||
if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
|
||||
mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
|
||||
sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
|
||||
|
||||
dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
|
||||
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
|
||||
mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
@ -675,9 +674,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
# We then create link to that mounted location so swift would know
|
||||
# where to go.
|
||||
for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
|
||||
|
||||
|
||||
# We now have to emulate a few different servers into one we
|
||||
# create all the directories needed for swift
|
||||
# create all the directories needed for swift
|
||||
tmpd=""
|
||||
for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
|
||||
${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
|
||||
@ -693,7 +692,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
# swift-init has a bug using /etc/swift until bug #885595 is fixed
|
||||
# we have to create a link
|
||||
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
|
||||
|
||||
|
||||
# Swift use rsync to syncronize between all the different
|
||||
# partitions (which make more sense when you have a multi-node
|
||||
# setup) we configure it with our version of rsync.
|
||||
@ -729,7 +728,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
local bind_port=$2
|
||||
local log_facility=$3
|
||||
local node_number
|
||||
|
||||
|
||||
for node_number in {1..4};do
|
||||
node_path=${SWIFT_DATA_LOCATION}/${node_number}
|
||||
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
|
||||
@ -756,14 +755,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
|
||||
# We then can start rsync.
|
||||
sudo /etc/init.d/rsync restart || :
|
||||
|
||||
|
||||
# Create our ring for the object/container/account.
|
||||
/usr/local/bin/swift-remakerings
|
||||
|
||||
# And now we launch swift-startmain to get our cluster running
|
||||
# ready to be tested.
|
||||
/usr/local/bin/swift-startmain || :
|
||||
|
||||
|
||||
unset s swift_hash swift_auth_server tmpd
|
||||
fi
|
||||
|
||||
@ -830,12 +829,12 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
|
||||
if [ -n "$INSTANCES_PATH" ]; then
|
||||
add_nova_flag "--instances_path=$INSTANCES_PATH"
|
||||
fi
|
||||
if [ -n "$MULTI_HOST" ]; then
|
||||
add_nova_flag "--multi_host=$MULTI_HOST"
|
||||
add_nova_flag "--send_arp_for_ha=1"
|
||||
if [ "$MULTI_HOST" != "False" ]; then
|
||||
add_nova_flag "--multi_host"
|
||||
add_nova_flag "--send_arp_for_ha"
|
||||
fi
|
||||
if [ "$SYSLOG" != "False" ]; then
|
||||
add_nova_flag "--use_syslog=1"
|
||||
add_nova_flag "--use_syslog"
|
||||
fi
|
||||
|
||||
# XenServer
|
||||
|
Loading…
Reference in New Issue
Block a user