2014-12-05 14:25:28 -05:00
|
|
|
|
#!/bin/bash
|
|
|
|
|
#
|
2014-01-30 15:37:40 -06:00
|
|
|
|
# functions - DevStack-specific functions
|
2012-03-27 14:50:45 -05:00
|
|
|
|
#
|
2012-08-28 17:43:40 -05:00
|
|
|
|
# The following variables are assumed to be defined by certain functions:
|
2013-10-24 11:27:02 +01:00
|
|
|
|
#
|
2014-02-17 11:00:42 -06:00
|
|
|
|
# - ``DATABASE_BACKENDS``
|
2013-10-24 11:27:02 +01:00
|
|
|
|
# - ``ENABLED_SERVICES``
|
|
|
|
|
# - ``FILES``
|
|
|
|
|
# - ``GLANCE_HOSTPORT``
|
2014-02-17 11:00:42 -06:00
|
|
|
|
#
|
2012-03-27 14:50:45 -05:00
|
|
|
|
|
2015-06-30 11:00:32 +10:00
|
|
|
|
# ensure we don't re-source this in the same environment
|
|
|
|
|
[[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0
|
2017-03-06 14:07:23 -05:00
|
|
|
|
declare -r -g _DEVSTACK_FUNCTIONS=1
|
2015-06-30 11:00:32 +10:00
|
|
|
|
|
2014-01-30 15:37:40 -06:00
|
|
|
|
# Include the common functions
|
|
|
|
|
FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
|
|
|
|
|
source ${FUNC_DIR}/functions-common
|
2015-03-09 15:16:10 -05:00
|
|
|
|
source ${FUNC_DIR}/inc/ini-config
|
2019-07-25 13:18:58 -05:00
|
|
|
|
source ${FUNC_DIR}/inc/meta-config
|
2015-01-30 14:38:35 -06:00
|
|
|
|
source ${FUNC_DIR}/inc/python
|
2015-03-29 14:16:44 -05:00
|
|
|
|
source ${FUNC_DIR}/inc/rootwrap
|
2021-01-19 12:10:52 -08:00
|
|
|
|
source ${FUNC_DIR}/inc/async
|
2012-01-31 12:11:56 -06:00
|
|
|
|
|
2012-03-16 16:16:56 -05:00
|
|
|
|
# Save trace setting
|
2015-10-13 11:03:03 +11:00
|
|
|
|
_XTRACE_FUNCTIONS=$(set +o | grep xtrace)
|
2012-03-16 16:16:56 -05:00
|
|
|
|
set +o xtrace
|
|
|
|
|
|
2014-06-03 16:05:12 +10:00
|
|
|
|
# Check if a function already exists
|
|
|
|
|
function function_exists {
|
|
|
|
|
declare -f -F $1 > /dev/null
|
|
|
|
|
}
|
2012-01-31 12:11:56 -06:00
|
|
|
|
|
2016-03-21 17:00:51 -04:00
|
|
|
|
# short_source prints out the current location of the caller in a way
|
|
|
|
|
# that strips redundant directories. This is useful for PS4 usage.
|
|
|
|
|
function short_source {
|
|
|
|
|
saveIFS=$IFS
|
|
|
|
|
IFS=" "
|
|
|
|
|
called=($(caller 0))
|
|
|
|
|
IFS=$saveIFS
|
|
|
|
|
file=${called[2]}
|
|
|
|
|
file=${file#$RC_DIR/}
|
|
|
|
|
printf "%-40s " "$file:${called[1]}:${called[0]}"
|
|
|
|
|
}
|
2016-05-05 12:50:52 -07:00
|
|
|
|
# PS4 is exported to child shells and uses the 'short_source' function, so
|
|
|
|
|
# export it so child shells have access to the 'short_source' function also.
|
|
|
|
|
export -f short_source
|
2016-03-21 17:00:51 -04:00
|
|
|
|
|
2017-09-03 12:13:59 -05:00
|
|
|
|
# Download a file from a URL
|
|
|
|
|
#
|
|
|
|
|
# Will check cache (in $FILES) or download given URL.
|
|
|
|
|
#
|
|
|
|
|
# Argument is the URL to the remote file
|
|
|
|
|
#
|
|
|
|
|
# Will echo the local path to the file as the output. Will die on
|
|
|
|
|
# failure to download.
|
|
|
|
|
#
|
|
|
|
|
# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS
|
|
|
|
|
# and tools/image_list.sh
|
|
|
|
|
function get_extra_file {
|
|
|
|
|
local file_url=$1
|
|
|
|
|
|
|
|
|
|
file_name=$(basename "$file_url")
|
|
|
|
|
if [[ $file_url != file* ]]; then
|
|
|
|
|
# If the file isn't cache, download it
|
|
|
|
|
if [[ ! -f $FILES/$file_name ]]; then
|
2017-09-17 22:18:07 +08:00
|
|
|
|
wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name
|
2017-09-03 12:13:59 -05:00
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
|
|
|
die "$file_url could not be downloaded"
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
echo "$FILES/$file_name"
|
|
|
|
|
return
|
|
|
|
|
else
|
|
|
|
|
# just strip the file:// bit and that's the path to the file
|
|
|
|
|
echo $file_url | sed 's/$file:\/\///g'
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-15 14:41:38 -07:00
|
|
|
|
# Generate image property arguments for OSC
|
|
|
|
|
#
|
|
|
|
|
# Arguments: properties, one per, like propname=value
|
|
|
|
|
#
|
|
|
|
|
# Result is --property propname1=value1 --property propname2=value2
|
|
|
|
|
function _image_properties_to_arg {
|
|
|
|
|
local result=""
|
|
|
|
|
for property in $*; do
|
|
|
|
|
result+=" --property $property"
|
|
|
|
|
done
|
|
|
|
|
echo $result
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-16 15:20:48 +00:00
|
|
|
|
# Upload an image to glance using the configured mechanism
|
|
|
|
|
#
|
|
|
|
|
# Arguments:
|
|
|
|
|
# image name
|
|
|
|
|
# container format
|
|
|
|
|
# disk format
|
|
|
|
|
# path to image file
|
|
|
|
|
# optional properties (format of propname=value)
|
|
|
|
|
#
|
|
|
|
|
function _upload_image {
|
|
|
|
|
local image_name="$1"
|
|
|
|
|
shift
|
|
|
|
|
local container="$1"
|
|
|
|
|
shift
|
|
|
|
|
local disk="$1"
|
|
|
|
|
shift
|
|
|
|
|
local image="$1"
|
|
|
|
|
shift
|
|
|
|
|
local properties
|
|
|
|
|
local useimport
|
|
|
|
|
|
2020-07-15 14:41:38 -07:00
|
|
|
|
properties=$(_image_properties_to_arg $*)
|
2020-06-16 15:20:48 +00:00
|
|
|
|
|
|
|
|
|
if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
|
2020-07-21 19:41:48 -07:00
|
|
|
|
useimport="--import"
|
2020-06-16 15:20:48 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
|
|
|
|
|
}
|
2016-03-21 17:00:51 -04:00
|
|
|
|
|
2013-10-05 12:11:07 +01:00
|
|
|
|
# Retrieve an image from a URL and upload into Glance.
|
2012-04-13 15:58:37 -05:00
|
|
|
|
# Uses the following variables:
|
2013-10-05 12:11:07 +01:00
|
|
|
|
#
|
|
|
|
|
# - ``FILES`` must be set to the cache dir
|
|
|
|
|
# - ``GLANCE_HOSTPORT``
|
|
|
|
|
#
|
2015-09-22 19:38:02 +00:00
|
|
|
|
# upload_image image-url
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function upload_image {
|
2012-04-13 15:58:37 -05:00
|
|
|
|
local image_url=$1
|
|
|
|
|
|
2014-07-25 11:09:36 -05:00
|
|
|
|
local image image_fname image_name
|
|
|
|
|
|
2012-04-13 15:58:37 -05:00
|
|
|
|
# Create a directory for the downloaded image tarballs.
|
|
|
|
|
mkdir -p $FILES/images
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_fname=`basename "$image_url"`
|
2013-11-15 16:06:03 -08:00
|
|
|
|
if [[ $image_url != file* ]]; then
|
2014-03-03 21:34:45 -08:00
|
|
|
|
# Downloads the image (uec ami+akistyle), then extracts it.
|
2014-07-25 11:09:36 -05:00
|
|
|
|
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
|
2015-01-13 14:01:26 +01:00
|
|
|
|
wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
|
2014-01-10 15:28:29 +09:00
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
|
|
|
echo "Not found: $image_url"
|
|
|
|
|
return
|
|
|
|
|
fi
|
2013-11-15 16:06:03 -08:00
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image="$FILES/${image_fname}"
|
2013-11-15 16:06:03 -08:00
|
|
|
|
else
|
2014-09-18 09:26:39 -05:00
|
|
|
|
# File based URL (RFC 1738): ``file://host/path``
|
2013-11-15 16:06:03 -08:00
|
|
|
|
# Remote files are not considered here.
|
2014-09-18 09:26:39 -05:00
|
|
|
|
# unix: ``file:///home/user/path/file``
|
|
|
|
|
# windows: ``file:///C:/Documents%20and%20Settings/user/path/file``
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image=$(echo $image_url | sed "s/^file:\/\///g")
|
|
|
|
|
if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then
|
2012-04-13 15:58:37 -05:00
|
|
|
|
echo "Not found: $image_url"
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
|
|
|
|
|
if [[ "$image_url" =~ 'openvz' ]]; then
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name="${image_fname%.tar.gz}"
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" ami ami "$image"
|
2012-04-13 15:58:37 -05:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2013-07-24 19:49:23 -07:00
|
|
|
|
# vmdk format images
|
|
|
|
|
if [[ "$image_url" =~ '.vmdk' ]]; then
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name="${image_fname%.vmdk}"
|
2013-09-04 23:51:29 -07:00
|
|
|
|
|
|
|
|
|
# Before we can upload vmdk type images to glance, we need to know it's
|
|
|
|
|
# disk type, storage adapter, and networking adapter. These values are
|
2013-11-11 21:20:14 -08:00
|
|
|
|
# passed to glance as custom properties.
|
2013-11-01 16:42:54 -07:00
|
|
|
|
# We take these values from the vmdk file if populated. Otherwise, we use
|
2013-09-04 23:51:29 -07:00
|
|
|
|
# vmdk filename, which is expected in the following format:
|
|
|
|
|
#
|
2013-11-11 21:20:14 -08:00
|
|
|
|
# <name>-<disk type>;<storage adapter>;<network adapter>
|
2013-09-04 23:51:29 -07:00
|
|
|
|
#
|
|
|
|
|
# If the filename does not follow the above format then the vsphere
|
|
|
|
|
# driver will supply default values.
|
2013-11-01 16:42:54 -07:00
|
|
|
|
|
2014-07-25 11:09:36 -05:00
|
|
|
|
local vmdk_disktype=""
|
2014-12-04 17:48:26 -08:00
|
|
|
|
local vmdk_net_adapter="e1000"
|
2014-07-25 11:09:36 -05:00
|
|
|
|
local path_len
|
2013-11-11 21:20:14 -08:00
|
|
|
|
|
2013-11-01 16:42:54 -07:00
|
|
|
|
# vmdk adapter type
|
2016-02-16 14:50:53 +11:00
|
|
|
|
local vmdk_adapter_type
|
|
|
|
|
vmdk_adapter_type="$(head -25 $image | { grep -a -F -m 1 'ddb.adapterType =' $image || true; })"
|
2013-11-01 16:42:54 -07:00
|
|
|
|
vmdk_adapter_type="${vmdk_adapter_type#*\"}"
|
|
|
|
|
vmdk_adapter_type="${vmdk_adapter_type%?}"
|
|
|
|
|
|
|
|
|
|
# vmdk disk type
|
2016-02-16 14:50:53 +11:00
|
|
|
|
local vmdk_create_type
|
|
|
|
|
vmdk_create_type="$(head -25 $image | { grep -a -F -m 1 'createType=' $image || true; })"
|
2013-11-01 16:42:54 -07:00
|
|
|
|
vmdk_create_type="${vmdk_create_type#*\"}"
|
2014-02-03 17:57:39 -08:00
|
|
|
|
vmdk_create_type="${vmdk_create_type%\"*}"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
|
|
|
|
|
descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
|
2014-01-10 15:28:29 +09:00
|
|
|
|
`"should use a descriptor-data pair."
|
2013-11-01 16:42:54 -07:00
|
|
|
|
if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
|
|
|
|
|
vmdk_disktype="sparse"
|
2014-07-25 11:09:36 -05:00
|
|
|
|
elif [[ "$vmdk_create_type" = "monolithicFlat" || "$vmdk_create_type" = "vmfs" ]]; then
|
2014-09-18 09:26:39 -05:00
|
|
|
|
# Attempt to retrieve the ``*-flat.vmdk``
|
2016-02-16 14:50:53 +11:00
|
|
|
|
local flat_fname
|
|
|
|
|
flat_fname="$(head -25 $image | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $image || true; })"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
flat_fname="${flat_fname#*\"}"
|
|
|
|
|
flat_fname="${flat_fname%?}"
|
2014-03-10 14:12:58 -07:00
|
|
|
|
if [[ -z "$flat_fname" ]]; then
|
2014-07-25 11:09:36 -05:00
|
|
|
|
flat_fname="$image_name-flat.vmdk"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
path_len=`expr ${#image_url} - ${#image_fname}`
|
|
|
|
|
local flat_url="${image_url:0:$path_len}$flat_fname"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
warn $LINENO "$descriptor_data_pair_msg"`
|
2014-01-10 15:28:29 +09:00
|
|
|
|
`" Attempt to retrieve the *-flat.vmdk: $flat_url"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
if [[ $flat_url != file* ]]; then
|
|
|
|
|
if [[ ! -f $FILES/$flat_fname || \
|
|
|
|
|
"$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
|
2015-01-13 14:01:26 +01:00
|
|
|
|
wget --progress=dot:giga -c $flat_url -O $FILES/$flat_fname
|
2013-11-22 16:05:39 -08:00
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image="$FILES/${flat_fname}"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
else
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image=$(echo $flat_url | sed "s/^file:\/\///g")
|
|
|
|
|
if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then
|
2013-11-22 16:05:39 -08:00
|
|
|
|
echo "Flat disk not found: $flat_url"
|
2014-03-10 14:12:58 -07:00
|
|
|
|
return 1
|
2013-11-22 16:05:39 -08:00
|
|
|
|
fi
|
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name="${flat_fname}"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
vmdk_disktype="preallocated"
|
2014-02-03 17:57:39 -08:00
|
|
|
|
elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then
|
|
|
|
|
vmdk_disktype="streamOptimized"
|
2013-11-22 16:05:39 -08:00
|
|
|
|
elif [[ -z "$vmdk_create_type" ]]; then
|
|
|
|
|
# *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
|
|
|
|
|
# to retrieve appropriate metadata
|
2014-07-25 11:09:36 -05:00
|
|
|
|
if [[ ${image_name: -5} != "-flat" ]]; then
|
2013-11-22 16:05:39 -08:00
|
|
|
|
warn $LINENO "Expected filename suffix: '-flat'."`
|
2014-07-25 11:09:36 -05:00
|
|
|
|
`" Filename provided: ${image_name}"
|
2014-03-05 15:35:49 -08:00
|
|
|
|
else
|
2014-07-25 11:09:36 -05:00
|
|
|
|
descriptor_fname="${image_name:0:${#image_name} - 5}.vmdk"
|
|
|
|
|
path_len=`expr ${#image_url} - ${#image_fname}`
|
|
|
|
|
local flat_path="${image_url:0:$path_len}"
|
|
|
|
|
local descriptor_url=$flat_path$descriptor_fname
|
2014-03-10 14:12:58 -07:00
|
|
|
|
warn $LINENO "$descriptor_data_pair_msg"`
|
|
|
|
|
`" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
|
|
|
|
|
if [[ $flat_path != file* ]]; then
|
|
|
|
|
if [[ ! -f $FILES/$descriptor_fname || \
|
|
|
|
|
"$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
|
|
|
|
|
wget -c $descriptor_url -O $FILES/$descriptor_fname
|
|
|
|
|
fi
|
|
|
|
|
descriptor_url="$FILES/$descriptor_fname"
|
|
|
|
|
else
|
|
|
|
|
descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
|
|
|
|
|
if [[ ! -f $descriptor_url || \
|
|
|
|
|
"$(stat -c "%s" $descriptor_url)" == "0" ]]; then
|
|
|
|
|
echo "Descriptor not found: $descriptor_url"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
2014-01-10 15:28:29 +09:00
|
|
|
|
fi
|
2014-03-05 15:35:49 -08:00
|
|
|
|
vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
|
|
|
|
|
vmdk_adapter_type="${vmdk_adapter_type#*\"}"
|
|
|
|
|
vmdk_adapter_type="${vmdk_adapter_type%?}"
|
|
|
|
|
fi
|
2014-01-10 15:28:29 +09:00
|
|
|
|
vmdk_disktype="preallocated"
|
2013-11-01 16:42:54 -07:00
|
|
|
|
else
|
|
|
|
|
vmdk_disktype="preallocated"
|
|
|
|
|
fi
|
2013-11-11 21:20:14 -08:00
|
|
|
|
|
|
|
|
|
# NOTE: For backwards compatibility reasons, colons may be used in place
|
|
|
|
|
# of semi-colons for property delimiters but they are not permitted
|
|
|
|
|
# characters in NTFS filesystems.
|
2014-07-25 11:09:36 -05:00
|
|
|
|
property_string=`echo "$image_name" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }`
|
2013-11-11 21:20:14 -08:00
|
|
|
|
IFS=':;' read -a props <<< "$property_string"
|
|
|
|
|
vmdk_disktype="${props[0]:-$vmdk_disktype}"
|
|
|
|
|
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
|
|
|
|
|
vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
|
2013-09-04 23:51:29 -07:00
|
|
|
|
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter"
|
|
|
|
|
|
2013-07-24 19:49:23 -07:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2015-07-15 11:47:11 +03:00
|
|
|
|
if [[ "$image_url" =~ '.hds' ]]; then
|
|
|
|
|
image_name="${image_fname%.hds}"
|
|
|
|
|
vm_mode=${image_name##*-}
|
|
|
|
|
if [[ $vm_mode != 'exe' && $vm_mode != 'hvm' ]]; then
|
|
|
|
|
die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image"
|
|
|
|
|
fi
|
|
|
|
|
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode
|
2015-07-15 11:47:11 +03:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2014-07-25 11:09:36 -05:00
|
|
|
|
local kernel=""
|
|
|
|
|
local ramdisk=""
|
|
|
|
|
local disk_format=""
|
|
|
|
|
local container_format=""
|
|
|
|
|
local unpack=""
|
2020-07-15 14:54:22 -07:00
|
|
|
|
local img_property=""
|
|
|
|
|
|
|
|
|
|
# NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model
|
|
|
|
|
# to libvirt in the image properties.
|
|
|
|
|
if [[ "$VIRT_DRIVER" == "libvirt" ]]; then
|
|
|
|
|
if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then
|
|
|
|
|
img_property="hw_rng_model=virtio"
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
2014-07-25 11:09:36 -05:00
|
|
|
|
case "$image_fname" in
|
2012-04-13 15:58:37 -05:00
|
|
|
|
*.tar.gz|*.tgz)
|
|
|
|
|
# Extract ami and aki files
|
2014-07-25 11:09:36 -05:00
|
|
|
|
[ "${image_fname%.tar.gz}" != "$image_fname" ] &&
|
|
|
|
|
image_name="${image_fname%.tar.gz}" ||
|
|
|
|
|
image_name="${image_fname%.tgz}"
|
|
|
|
|
local xdir="$FILES/images/$image_name"
|
2012-04-13 15:58:37 -05:00
|
|
|
|
rm -Rf "$xdir";
|
|
|
|
|
mkdir "$xdir"
|
2014-07-25 11:09:36 -05:00
|
|
|
|
tar -zxf $image -C "$xdir"
|
|
|
|
|
kernel=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
|
2013-10-22 07:43:22 -04:00
|
|
|
|
[ -f "$f" ] && echo "$f" && break; done; true)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
ramdisk=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
|
2013-10-22 07:43:22 -04:00
|
|
|
|
[ -f "$f" ] && echo "$f" && break; done; true)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
|
2013-10-22 07:43:22 -04:00
|
|
|
|
[ -f "$f" ] && echo "$f" && break; done; true)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
if [[ -z "$image_name" ]]; then
|
|
|
|
|
image_name=$(basename "$image" ".img")
|
2012-04-13 15:58:37 -05:00
|
|
|
|
fi
|
|
|
|
|
;;
|
|
|
|
|
*.img)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name=$(basename "$image" ".img")
|
2015-10-07 14:06:26 +11:00
|
|
|
|
local format
|
|
|
|
|
format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
|
2012-09-14 11:36:07 -05:00
|
|
|
|
if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
|
2014-07-25 11:09:36 -05:00
|
|
|
|
disk_format=$format
|
2012-09-14 11:36:07 -05:00
|
|
|
|
else
|
2014-07-25 11:09:36 -05:00
|
|
|
|
disk_format=raw
|
2012-09-14 11:36:07 -05:00
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
container_format=bare
|
2012-04-13 15:58:37 -05:00
|
|
|
|
;;
|
|
|
|
|
*.img.gz)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name=$(basename "$image" ".img.gz")
|
|
|
|
|
disk_format=raw
|
|
|
|
|
container_format=bare
|
|
|
|
|
unpack=zcat
|
2012-04-13 15:58:37 -05:00
|
|
|
|
;;
|
2016-04-17 11:11:58 -04:00
|
|
|
|
*.img.bz2)
|
|
|
|
|
image_name=$(basename "$image" ".img.bz2")
|
|
|
|
|
disk_format=qcow2
|
|
|
|
|
container_format=bare
|
|
|
|
|
unpack=bunzip2
|
|
|
|
|
;;
|
2012-04-13 15:58:37 -05:00
|
|
|
|
*.qcow2)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name=$(basename "$image" ".qcow2")
|
|
|
|
|
disk_format=qcow2
|
|
|
|
|
container_format=bare
|
2017-05-11 15:13:29 +03:00
|
|
|
|
;;
|
2020-04-20 09:53:25 +00:00
|
|
|
|
*.qcow2.xz)
|
|
|
|
|
image_name=$(basename "$image" ".qcow2.xz")
|
|
|
|
|
disk_format=qcow2
|
|
|
|
|
container_format=bare
|
|
|
|
|
unpack=unxz
|
|
|
|
|
;;
|
2017-05-11 15:13:29 +03:00
|
|
|
|
*.raw)
|
|
|
|
|
image_name=$(basename "$image" ".raw")
|
|
|
|
|
disk_format=raw
|
|
|
|
|
container_format=bare
|
2012-04-13 15:58:37 -05:00
|
|
|
|
;;
|
2013-03-21 14:29:58 +01:00
|
|
|
|
*.iso)
|
2014-07-25 11:09:36 -05:00
|
|
|
|
image_name=$(basename "$image" ".iso")
|
|
|
|
|
disk_format=iso
|
|
|
|
|
container_format=bare
|
2013-03-21 14:29:58 +01:00
|
|
|
|
;;
|
2014-08-07 02:05:26 +03:00
|
|
|
|
*.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz)
|
|
|
|
|
local extension="${image_fname#*.}"
|
|
|
|
|
image_name=$(basename "$image" ".$extension")
|
2017-05-24 11:31:56 +03:00
|
|
|
|
disk_format=$(echo $image_fname | grep -oP '(?<=\.)vhdx?(?=\.|$)')
|
2014-08-07 02:05:26 +03:00
|
|
|
|
container_format=bare
|
|
|
|
|
if [ "${image_fname##*.}" == "gz" ]; then
|
|
|
|
|
unpack=zcat
|
|
|
|
|
fi
|
|
|
|
|
;;
|
2014-07-25 11:09:36 -05:00
|
|
|
|
*) echo "Do not know what to do with $image_fname"; false;;
|
2012-04-13 15:58:37 -05:00
|
|
|
|
esac
|
|
|
|
|
|
2016-02-09 07:08:38 -06:00
|
|
|
|
if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then
|
2020-06-16 15:20:48 +00:00
|
|
|
|
img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0"
|
2013-12-02 14:04:32 -02:00
|
|
|
|
fi
|
|
|
|
|
|
2015-04-07 16:31:47 +00:00
|
|
|
|
if is_arch "aarch64"; then
|
2020-06-16 15:20:48 +00:00
|
|
|
|
img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'"
|
2015-04-07 16:31:47 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2014-07-25 11:09:36 -05:00
|
|
|
|
if [ "$container_format" = "bare" ]; then
|
|
|
|
|
if [ "$unpack" = "zcat" ]; then
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property
|
2016-04-17 11:11:58 -04:00
|
|
|
|
elif [ "$unpack" = "bunzip2" ]; then
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property
|
2020-04-20 09:53:25 +00:00
|
|
|
|
elif [ "$unpack" = "unxz" ]; then
|
|
|
|
|
# NOTE(brtknr): unxz the file first and cleanup afterwards to
|
|
|
|
|
# prevent timeout while Glance tries to upload image (e.g. to Swift).
|
|
|
|
|
local tmp_dir
|
|
|
|
|
local image_path
|
|
|
|
|
tmp_dir=$(mktemp -d)
|
|
|
|
|
image_path="$tmp_dir/$image_name"
|
|
|
|
|
unxz -cv "${image}" > "$image_path"
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" $container_format $disk_format "$image_path" $img_property
|
2020-04-20 09:53:25 +00:00
|
|
|
|
rm -rf $tmp_dir
|
2012-04-13 15:58:37 -05:00
|
|
|
|
else
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "$image_name" $container_format $disk_format "$image" $img_property
|
2012-04-13 15:58:37 -05:00
|
|
|
|
fi
|
|
|
|
|
else
|
|
|
|
|
# Use glance client to add the kernel the root filesystem.
|
|
|
|
|
# We parse the results of the first upload to get the glance ID of the
|
|
|
|
|
# kernel for use when uploading the root filesystem.
|
2014-07-25 11:09:36 -05:00
|
|
|
|
local kernel_id="" ramdisk_id="";
|
|
|
|
|
if [ -n "$kernel" ]; then
|
2020-07-15 14:41:38 -07:00
|
|
|
|
kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
|
2012-04-13 15:58:37 -05:00
|
|
|
|
fi
|
2014-07-25 11:09:36 -05:00
|
|
|
|
if [ -n "$ramdisk" ]; then
|
2020-07-15 14:41:38 -07:00
|
|
|
|
ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
|
2012-04-13 15:58:37 -05:00
|
|
|
|
fi
|
2020-06-16 15:20:48 +00:00
|
|
|
|
_upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
|
2012-04-13 15:58:37 -05:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2012-11-05 14:26:09 -06:00
|
|
|
|
# Set the database backend to use
|
|
|
|
|
# When called from stackrc/localrc DATABASE_BACKENDS has not been
|
|
|
|
|
# initialized yet, just save the configuration selection and call back later
|
|
|
|
|
# to validate it.
|
2013-10-05 12:11:07 +01:00
|
|
|
|
#
|
2019-10-17 19:34:05 +00:00
|
|
|
|
# ``$1`` - the name of the database backend to use (mysql, postgresql, ...)
|
2012-11-05 14:26:09 -06:00
|
|
|
|
function use_database {
|
|
|
|
|
if [[ -z "$DATABASE_BACKENDS" ]]; then
|
2013-02-07 15:56:24 -06:00
|
|
|
|
# No backends registered means this is likely called from ``localrc``
|
|
|
|
|
# This is now deprecated usage
|
2012-11-05 14:26:09 -06:00
|
|
|
|
DATABASE_TYPE=$1
|
2015-10-07 11:51:40 -04:00
|
|
|
|
deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc"
|
2012-12-16 15:05:44 +01:00
|
|
|
|
else
|
2013-02-07 15:56:24 -06:00
|
|
|
|
# This should no longer get called...here for posterity
|
2012-12-16 15:05:44 +01:00
|
|
|
|
use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
|
2012-11-05 14:26:09 -06:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-24 07:23:24 +00:00
|
|
|
|
#Macro for curl statements. curl requires -g option for literal IPv6 addresses.
|
|
|
|
|
CURL_GET="${CURL_GET:-curl -g}"
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2012-12-11 15:26:24 -06:00
|
|
|
|
# Wait for an HTTP server to start answering requests
|
|
|
|
|
# wait_for_service timeout url
|
2016-05-06 12:35:22 -04:00
|
|
|
|
#
|
|
|
|
|
# If the service we want is behind a proxy, the proxy may be available
|
|
|
|
|
# before the service. Compliant proxies will return a 503 in this case
|
|
|
|
|
# Loop until we get something else.
|
|
|
|
|
# Also check for the case where there is no proxy and the service just
|
|
|
|
|
# hasn't started yet. curl returns 7 for Failed to connect to host.
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function wait_for_service {
|
2012-12-11 15:26:24 -06:00
|
|
|
|
local timeout=$1
|
|
|
|
|
local url=$2
|
2016-05-06 12:35:22 -04:00
|
|
|
|
local rval=0
|
2015-12-08 15:36:13 +09:00
|
|
|
|
time_start "wait_for_service"
|
2016-05-06 12:35:22 -04:00
|
|
|
|
timeout $timeout bash -x <<EOF || rval=$?
|
|
|
|
|
while [[ \$( ${CURL_GET} -k --noproxy '*' -s -o /dev/null -w '%{http_code}' ${url} ) == 503 || \$? -eq 7 ]]; do
|
|
|
|
|
sleep 1
|
|
|
|
|
done
|
|
|
|
|
EOF
|
2015-12-08 15:36:13 +09:00
|
|
|
|
time_stop "wait_for_service"
|
2016-05-06 12:35:22 -04:00
|
|
|
|
return $rval
|
2012-12-11 15:26:24 -06:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-28 11:29:18 +00:00
|
|
|
|
function wait_for_compute {
|
|
|
|
|
local timeout=$1
|
|
|
|
|
local rval=0
|
2018-03-16 16:33:46 +05:30
|
|
|
|
local compute_hostname
|
2017-07-28 11:29:18 +00:00
|
|
|
|
time_start "wait_for_service"
|
2018-03-16 16:33:46 +05:30
|
|
|
|
compute_hostname=$(iniget $NOVA_CONF DEFAULT host)
|
|
|
|
|
if [[ -z $compute_hostname ]]; then
|
|
|
|
|
compute_hostname=$(hostname)
|
|
|
|
|
fi
|
2017-07-28 11:29:18 +00:00
|
|
|
|
timeout $timeout bash -x <<EOF || rval=$?
|
|
|
|
|
ID=""
|
|
|
|
|
while [[ "\$ID" == "" ]]; do
|
|
|
|
|
sleep 1
|
2018-02-07 18:35:40 +00:00
|
|
|
|
if [[ "$VIRT_DRIVER" = 'fake' ]]; then
|
|
|
|
|
# When using the fake driver the compute hostnames have a suffix of 1 to NUMBER_FAKE_NOVA_COMPUTE
|
|
|
|
|
ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname`1 --service nova-compute -c ID -f value)
|
|
|
|
|
else
|
2018-03-16 16:33:46 +05:30
|
|
|
|
ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host "$compute_hostname" --service nova-compute -c ID -f value)
|
2018-02-07 18:35:40 +00:00
|
|
|
|
fi
|
2017-07-28 11:29:18 +00:00
|
|
|
|
done
|
|
|
|
|
EOF
|
|
|
|
|
time_stop "wait_for_service"
|
|
|
|
|
# Figure out what's happening on platforms where this doesn't work
|
|
|
|
|
if [[ "$rval" != 0 ]]; then
|
|
|
|
|
echo "Didn't find service registered by hostname after $timeout seconds"
|
|
|
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list
|
|
|
|
|
fi
|
|
|
|
|
return $rval
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2012-10-24 17:26:02 -07:00
|
|
|
|
# ping check
|
2019-05-21 14:17:11 +01:00
|
|
|
|
# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``PRIVATE_NETWORK``
|
2015-04-16 08:58:32 -04:00
|
|
|
|
# ping_check <ip> [boot-timeout] [from_net] [expected]
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function ping_check {
|
2015-04-16 08:58:32 -04:00
|
|
|
|
local ip=$1
|
|
|
|
|
local timeout=${2:-30}
|
|
|
|
|
local from_net=${3:-""}
|
|
|
|
|
local expected=${4:-True}
|
|
|
|
|
local op="!"
|
|
|
|
|
local failmsg="[Fail] Couldn't ping server"
|
|
|
|
|
local ping_cmd="ping"
|
2012-10-24 17:26:02 -07:00
|
|
|
|
|
2015-04-16 08:58:32 -04:00
|
|
|
|
# if we don't specify a from_net we're expecting things to work
|
|
|
|
|
# fine from our local box.
|
|
|
|
|
if [[ -n "$from_net" ]]; then
|
2019-05-21 14:17:11 +01:00
|
|
|
|
# TODO(stephenfin): Is there any way neutron could be disabled now?
|
2015-04-16 08:58:32 -04:00
|
|
|
|
if is_service_enabled neutron; then
|
|
|
|
|
ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net"
|
|
|
|
|
fi
|
2012-10-24 17:26:02 -07:00
|
|
|
|
fi
|
2015-04-16 08:58:32 -04:00
|
|
|
|
|
|
|
|
|
# inverse the logic if we're testing no connectivity
|
|
|
|
|
if [[ "$expected" != "True" ]]; then
|
|
|
|
|
op=""
|
|
|
|
|
failmsg="[Fail] Could ping server"
|
2012-10-29 11:25:29 -07:00
|
|
|
|
fi
|
2015-04-16 08:58:32 -04:00
|
|
|
|
|
|
|
|
|
# Because we've transformed this command so many times, print it
|
|
|
|
|
# out at the end.
|
|
|
|
|
local check_command="while $op $ping_cmd -c1 -w1 $ip; do sleep 1; done"
|
|
|
|
|
echo "Checking connectivity with $check_command"
|
|
|
|
|
|
|
|
|
|
if ! timeout $timeout sh -c "$check_command"; then
|
|
|
|
|
die $LINENO $failmsg
|
2012-10-24 17:26:02 -07:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-12 18:18:56 -07:00
|
|
|
|
# Get ip of instance
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function get_instance_ip {
|
2013-08-12 18:18:56 -07:00
|
|
|
|
local vm_id=$1
|
|
|
|
|
local network_name=$2
|
2017-12-25 16:28:50 +09:00
|
|
|
|
local addresses
|
2015-10-07 14:06:26 +11:00
|
|
|
|
local ip
|
2016-02-16 14:50:53 +11:00
|
|
|
|
|
2017-12-25 16:28:50 +09:00
|
|
|
|
addresses=$(openstack server show -c addresses -f value "$vm_id")
|
|
|
|
|
ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p")
|
2013-08-12 18:18:56 -07:00
|
|
|
|
if [[ $ip = "" ]];then
|
2017-12-25 16:28:50 +09:00
|
|
|
|
echo "addresses of server $vm_id : $addresses"
|
2015-11-12 19:50:00 +09:00
|
|
|
|
die $LINENO "[Fail] Couldn't get ipaddress of VM"
|
2013-08-12 18:18:56 -07:00
|
|
|
|
fi
|
|
|
|
|
echo $ip
|
|
|
|
|
}
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2012-10-24 17:26:02 -07:00
|
|
|
|
# ssh check
|
2012-10-29 11:25:29 -07:00
|
|
|
|
|
2013-06-03 16:47:36 -05:00
|
|
|
|
# ssh_check net-name key-file floating-ip default-user active-timeout
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function ssh_check {
|
2013-07-06 23:29:39 -04:00
|
|
|
|
if is_service_enabled neutron; then
|
|
|
|
|
_ssh_check_neutron "$1" $2 $3 $4 $5
|
2012-10-29 11:25:29 -07:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
_ssh_check_novanet "$1" $2 $3 $4 $5
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function _ssh_check_novanet {
|
2012-10-24 17:26:02 -07:00
|
|
|
|
local NET_NAME=$1
|
|
|
|
|
local KEY_FILE=$2
|
|
|
|
|
local FLOATING_IP=$3
|
|
|
|
|
local DEFAULT_INSTANCE_USER=$4
|
|
|
|
|
local ACTIVE_TIMEOUT=$5
|
2012-11-07 16:51:21 -06:00
|
|
|
|
local probe_cmd=""
|
2013-04-08 15:38:03 -05:00
|
|
|
|
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
|
2013-02-26 12:38:18 -08:00
|
|
|
|
die $LINENO "server didn't become ssh-able!"
|
2012-10-24 17:26:02 -07:00
|
|
|
|
fi
|
|
|
|
|
}
|
2012-03-27 14:50:45 -05:00
|
|
|
|
|
2012-11-21 16:04:12 +01:00
|
|
|
|
|
|
|
|
|
# Get the location of the $module-rootwrap executables, where module is cinder
|
|
|
|
|
# or nova.
|
|
|
|
|
# get_rootwrap_location module
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function get_rootwrap_location {
|
2012-11-21 16:04:12 +01:00
|
|
|
|
local module=$1
|
|
|
|
|
|
2013-01-30 15:35:54 +01:00
|
|
|
|
echo "$(get_python_exec_prefix)/$module-rootwrap"
|
2012-11-21 16:04:12 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2013-04-11 12:04:36 +10:00
|
|
|
|
# Path permissions sanity check
|
|
|
|
|
# check_path_perm_sanity path
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function check_path_perm_sanity {
|
2013-04-11 12:04:36 +10:00
|
|
|
|
# Ensure no element of the path has 0700 permissions, which is very
|
|
|
|
|
# likely to cause issues for daemons. Inspired by default 0700
|
|
|
|
|
# homedir permissions on RHEL and common practice of making DEST in
|
|
|
|
|
# the stack user's homedir.
|
|
|
|
|
|
2015-10-07 14:06:26 +11:00
|
|
|
|
local real_path
|
|
|
|
|
real_path=$(readlink -f $1)
|
2013-04-11 12:04:36 +10:00
|
|
|
|
local rebuilt_path=""
|
|
|
|
|
for i in $(echo ${real_path} | tr "/" " "); do
|
|
|
|
|
rebuilt_path=$rebuilt_path"/"$i
|
|
|
|
|
|
|
|
|
|
if [[ $(stat -c '%a' ${rebuilt_path}) = 700 ]]; then
|
|
|
|
|
echo "*** DEST path element"
|
|
|
|
|
echo "*** ${rebuilt_path}"
|
|
|
|
|
echo "*** appears to have 0700 permissions."
|
2015-03-28 08:20:50 -05:00
|
|
|
|
echo "*** This is very likely to cause fatal issues for DevStack daemons."
|
2013-04-11 12:04:36 +10:00
|
|
|
|
|
|
|
|
|
if [[ -n "$SKIP_PATH_SANITY" ]]; then
|
|
|
|
|
return
|
|
|
|
|
else
|
|
|
|
|
echo "*** Set SKIP_PATH_SANITY to skip this check"
|
|
|
|
|
die $LINENO "Invalid path permissions"
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-03 16:47:36 -05:00
|
|
|
|
|
2015-11-12 13:52:36 +11:00
|
|
|
|
# vercmp ver1 op ver2
|
|
|
|
|
# Compare VER1 to VER2
|
|
|
|
|
# - op is one of < <= == >= >
|
|
|
|
|
# - returns true if satisified
|
|
|
|
|
# e.g.
|
|
|
|
|
# if vercmp 1.0 "<" 2.0; then
|
|
|
|
|
# ...
|
|
|
|
|
# fi
|
|
|
|
|
function vercmp {
|
|
|
|
|
local v1=$1
|
|
|
|
|
local op=$2
|
|
|
|
|
local v2=$3
|
|
|
|
|
local result
|
|
|
|
|
|
|
|
|
|
# sort the two numbers with sort's "-V" argument. Based on if v2
|
|
|
|
|
# swapped places with v1, we can determine ordering.
|
|
|
|
|
result=$(echo -e "$v1\n$v2" | sort -V | head -1)
|
|
|
|
|
|
|
|
|
|
case $op in
|
|
|
|
|
"==")
|
|
|
|
|
[ "$v1" = "$v2" ]
|
|
|
|
|
return
|
|
|
|
|
;;
|
|
|
|
|
">")
|
|
|
|
|
[ "$v1" != "$v2" ] && [ "$result" = "$v2" ]
|
|
|
|
|
return
|
|
|
|
|
;;
|
|
|
|
|
"<")
|
|
|
|
|
[ "$v1" != "$v2" ] && [ "$result" = "$v1" ]
|
|
|
|
|
return
|
|
|
|
|
;;
|
|
|
|
|
">=")
|
|
|
|
|
[ "$result" = "$v2" ]
|
|
|
|
|
return
|
|
|
|
|
;;
|
|
|
|
|
"<=")
|
|
|
|
|
[ "$result" = "$v1" ]
|
|
|
|
|
return
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
die $LINENO "unrecognised op: $op"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
2013-06-13 11:47:56 +00:00
|
|
|
|
|
2016-04-05 12:08:57 -04:00
|
|
|
|
# This sets up defaults we like in devstack for logging for tracking
|
|
|
|
|
# down issues, and makes sure everything is done the same between
|
|
|
|
|
# projects.
|
2020-01-20 15:52:33 +00:00
|
|
|
|
# NOTE(jh): Historically this function switched between three different
|
|
|
|
|
# functions: setup_systemd_logging, setup_colorized_logging and
|
|
|
|
|
# setup_standard_logging_identity. Since we always run with systemd now,
|
|
|
|
|
# this could be cleaned up, but the other functions may still be in use
|
|
|
|
|
# by plugins. Since deprecations haven't worked in the past, we'll just
|
|
|
|
|
# leave them in place.
|
2016-04-05 12:08:57 -04:00
|
|
|
|
function setup_logging {
|
2020-01-20 15:52:33 +00:00
|
|
|
|
setup_systemd_logging $1
|
2016-04-05 12:08:57 -04:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-20 14:51:08 -07:00
|
|
|
|
# This function sets log formatting options for colorizing log
|
|
|
|
|
# output to stdout. It is meant to be called by lib modules.
|
2014-02-21 15:35:08 +11:00
|
|
|
|
function setup_colorized_logging {
|
2013-08-20 14:51:08 -07:00
|
|
|
|
local conf_file=$1
|
|
|
|
|
# Add color to logging output
|
2020-01-20 15:52:33 +00:00
|
|
|
|
iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
|
|
|
|
|
iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
|
|
|
|
|
iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
|
|
|
|
|
iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
|
2013-08-20 14:51:08 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-03-21 20:50:24 -04:00
|
|
|
|
function setup_systemd_logging {
|
|
|
|
|
local conf_file=$1
|
2017-05-03 09:58:21 -04:00
|
|
|
|
# NOTE(sdague): this is a nice to have, and means we're using the
|
|
|
|
|
# native systemd path, which provides for things like search on
|
|
|
|
|
# request-id. However, there may be an eventlet interaction here,
|
|
|
|
|
# so going off for now.
|
2017-05-24 13:00:47 +03:00
|
|
|
|
USE_JOURNAL=$(trueorfalse False USE_JOURNAL)
|
2017-05-16 13:52:03 -05:00
|
|
|
|
local pidstr=""
|
2017-05-03 09:58:21 -04:00
|
|
|
|
if [[ "$USE_JOURNAL" == "True" ]]; then
|
2020-01-20 15:52:33 +00:00
|
|
|
|
iniset $conf_file DEFAULT use_journal "True"
|
2017-05-03 09:58:21 -04:00
|
|
|
|
# if we are using the journal directly, our process id is already correct
|
|
|
|
|
else
|
2017-05-16 13:52:03 -05:00
|
|
|
|
pidstr="(pid=%(process)d) "
|
2017-05-03 09:58:21 -04:00
|
|
|
|
fi
|
2020-01-20 15:52:33 +00:00
|
|
|
|
iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33m{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}[00m"
|
2017-05-03 09:58:21 -04:00
|
|
|
|
|
2020-01-20 15:52:33 +00:00
|
|
|
|
iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
|
|
|
|
|
iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
|
|
|
|
|
iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
|
2017-03-21 20:50:24 -04:00
|
|
|
|
}
|
|
|
|
|
|
2016-04-05 12:08:57 -04:00
|
|
|
|
function setup_standard_logging_identity {
|
|
|
|
|
local conf_file=$1
|
|
|
|
|
iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s"
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 16:05:12 +10:00
|
|
|
|
# These functions are provided for basic fall-back functionality for
|
2015-03-28 08:20:50 -05:00
|
|
|
|
# projects that include parts of DevStack (Grenade). stack.sh will
|
|
|
|
|
# override these with more specific versions for DevStack (with fancy
|
2014-06-03 16:05:12 +10:00
|
|
|
|
# spinners, etc). We never override an existing version
|
|
|
|
|
if ! function_exists echo_summary; then
|
|
|
|
|
function echo_summary {
|
|
|
|
|
echo $@
|
|
|
|
|
}
|
|
|
|
|
fi
|
|
|
|
|
if ! function_exists echo_nolog; then
|
|
|
|
|
function echo_nolog {
|
|
|
|
|
echo $@
|
|
|
|
|
}
|
|
|
|
|
fi
|
2014-01-30 15:37:40 -06:00
|
|
|
|
|
2014-01-06 18:09:26 +01:00
|
|
|
|
|
2020-07-24 15:44:34 -07:00
|
|
|
|
# create_disk - Create, configure, and mount a backing disk
|
2014-01-06 18:09:26 +01:00
|
|
|
|
function create_disk {
|
|
|
|
|
local node_number
|
|
|
|
|
local disk_image=${1}
|
|
|
|
|
local storage_data_dir=${2}
|
|
|
|
|
local loopback_disk_size=${3}
|
2020-07-24 15:44:34 -07:00
|
|
|
|
local key
|
2014-01-06 18:09:26 +01:00
|
|
|
|
|
2020-11-13 06:57:33 -08:00
|
|
|
|
key=$(echo $disk_image | sed 's#/.##')
|
|
|
|
|
key="devstack-$key"
|
2014-01-06 18:09:26 +01:00
|
|
|
|
|
2020-11-13 06:57:33 -08:00
|
|
|
|
destroy_disk $disk_image $storage_data_dir
|
2014-01-06 18:09:26 +01:00
|
|
|
|
|
2020-11-13 06:57:33 -08:00
|
|
|
|
# Create an empty file of the correct size (and ensure the
|
|
|
|
|
# directory structure up to that path exists)
|
|
|
|
|
sudo mkdir -p $(dirname ${disk_image})
|
2014-01-06 18:09:26 +01:00
|
|
|
|
sudo truncate -s ${loopback_disk_size} ${disk_image}
|
|
|
|
|
|
|
|
|
|
# Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
|
|
|
|
|
# a single inode. Keeping the default inode size (256) will result in multiple
|
|
|
|
|
# inodes being used to store xattr. Retrieving the xattr will be slower
|
|
|
|
|
# since we have to read multiple inodes. This statement is true for both
|
|
|
|
|
# Swift and Ceph.
|
|
|
|
|
sudo mkfs.xfs -f -i size=1024 ${disk_image}
|
|
|
|
|
|
2020-11-13 06:57:33 -08:00
|
|
|
|
# Install a new loopback fstab entry for this disk image, and mount it
|
2020-07-24 15:44:34 -07:00
|
|
|
|
echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab
|
2020-11-13 06:57:33 -08:00
|
|
|
|
sudo mkdir -p $storage_data_dir
|
2020-07-24 15:44:34 -07:00
|
|
|
|
sudo mount -v $storage_data_dir
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Unmount, de-configure, and destroy a backing disk
|
|
|
|
|
function destroy_disk {
|
|
|
|
|
local disk_image=$1
|
|
|
|
|
local storage_data_dir=$2
|
2020-11-13 06:57:33 -08:00
|
|
|
|
local key
|
|
|
|
|
|
|
|
|
|
key=$(echo $disk_image | sed 's#/.##')
|
|
|
|
|
key="devstack-$key"
|
2020-07-24 15:44:34 -07:00
|
|
|
|
|
|
|
|
|
# Unmount the target, if mounted
|
|
|
|
|
if egrep -q $storage_data_dir /proc/mounts; then
|
|
|
|
|
sudo umount $storage_data_dir
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Clear any fstab rules
|
2020-11-13 06:57:33 -08:00
|
|
|
|
sudo sed -i '/.*comment=$key.*/ d' /etc/fstab
|
2020-07-24 15:44:34 -07:00
|
|
|
|
|
|
|
|
|
# Delete the file
|
2020-11-13 06:57:33 -08:00
|
|
|
|
sudo rm -f $disk_image
|
2014-01-06 18:09:26 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-15 20:17:13 +02:00
|
|
|
|
|
|
|
|
|
# set_mtu - Set MTU on a device
|
|
|
|
|
function set_mtu {
|
|
|
|
|
local dev=$1
|
|
|
|
|
local mtu=$2
|
|
|
|
|
sudo ip link set mtu $mtu dev $dev
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2017-02-06 16:56:46 +02:00
|
|
|
|
# running_in_container - Returns true otherwise false
|
|
|
|
|
function running_in_container {
|
2017-03-23 05:52:33 +00:00
|
|
|
|
[[ $(systemd-detect-virt --container) != 'none' ]]
|
2017-02-06 16:56:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-09-29 13:26:30 +00:00
|
|
|
|
# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling
|
|
|
|
|
function enable_kernel_bridge_firewall {
|
|
|
|
|
# Load bridge module. This module provides access to firewall for bridged
|
|
|
|
|
# frames; and also on older kernels (pre-3.18) it provides sysctl knobs to
|
|
|
|
|
# enable/disable bridge firewalling
|
|
|
|
|
sudo modprobe bridge
|
|
|
|
|
# For newer kernels (3.18+), those sysctl settings are split into a separate
|
|
|
|
|
# kernel module (br_netfilter). Load it too, if present.
|
|
|
|
|
sudo modprobe br_netfilter 2>> /dev/null || :
|
|
|
|
|
# Enable bridge firewalling in case it's disabled in kernel (upstream
|
|
|
|
|
# default is enabled, but some distributions may decide to change it).
|
|
|
|
|
# This is at least needed for RHEL 7.2 and earlier releases.
|
2016-12-17 04:12:24 +00:00
|
|
|
|
for proto in ip ip6; do
|
2016-09-29 13:26:30 +00:00
|
|
|
|
sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2017-05-16 08:50:53 -07:00
|
|
|
|
# Set a systemd system override
|
|
|
|
|
#
|
|
|
|
|
# This sets a system-side override in system.conf. A per-service
|
|
|
|
|
# override would be /etc/systemd/system/${service}.service/override.conf
|
|
|
|
|
function set_systemd_override {
|
|
|
|
|
local key="$1"
|
|
|
|
|
local value="$2"
|
|
|
|
|
|
|
|
|
|
local sysconf="/etc/systemd/system.conf"
|
|
|
|
|
iniset -sudo "${sysconf}" "Manager" "$key" "$value"
|
|
|
|
|
echo "Set systemd system override for ${key}=${value}"
|
|
|
|
|
|
|
|
|
|
sudo systemctl daemon-reload
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-23 15:18:31 -04:00
|
|
|
|
# Get a random port from the local port range
|
|
|
|
|
#
|
|
|
|
|
# This function returns an available port in the local port range. The search
|
|
|
|
|
# order is not truly random, but should be considered a random value by the
|
|
|
|
|
# user because it depends on the state of your local system.
|
|
|
|
|
function get_random_port {
|
|
|
|
|
read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range
|
|
|
|
|
while true; do
|
|
|
|
|
for (( port = upper_port ; port >= lower_port ; port-- )); do
|
|
|
|
|
sudo lsof -i ":$port" &> /dev/null
|
|
|
|
|
if [[ $? > 0 ]] ; then
|
|
|
|
|
break 2
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
done
|
|
|
|
|
echo $port
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-30 12:29:19 +10:00
|
|
|
|
# Save some state information
|
|
|
|
|
#
|
|
|
|
|
# Write out various useful state information to /etc/devstack-version
|
2017-06-28 09:13:04 -04:00
|
|
|
|
function write_devstack_version {
|
2018-03-02 21:13:12 +01:00
|
|
|
|
cat - <<EOF | sudo tee /etc/devstack-version >/dev/null
|
2017-06-30 12:29:19 +10:00
|
|
|
|
DevStack Version: ${DEVSTACK_SERIES}
|
|
|
|
|
Change: $(git log --format="%H %s %ci" -1)
|
|
|
|
|
OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME}
|
2017-06-28 09:13:04 -04:00
|
|
|
|
EOF
|
|
|
|
|
}
|
|
|
|
|
|
2012-03-16 16:16:56 -05:00
|
|
|
|
# Restore xtrace
|
2015-10-13 11:03:03 +11:00
|
|
|
|
$_XTRACE_FUNCTIONS
|
2012-08-28 17:43:40 -05:00
|
|
|
|
|
|
|
|
|
# Local variables:
|
2013-03-29 14:34:53 -04:00
|
|
|
|
# mode: shell-script
|
2012-09-05 17:23:14 -04:00
|
|
|
|
# End:
|