Depracate murano-conductor
Change-Id: Idd817e4c8f2e0fb3b74734911569e640951eb571
This commit is contained in:
parent
8b6a39e319
commit
17cb2eb789
@ -1,2 +0,0 @@
|
||||
include requirements.txt
|
||||
recursive-include etc *
|
39
README.rst
39
README.rst
@ -1,37 +1,8 @@
|
||||
Murano
|
||||
======
|
||||
Murano Project introduces an application catalog, which allows application
|
||||
developers and cloud administrators to publish various cloud-ready
|
||||
applications in a browsable categorised catalog, which may be used by the
|
||||
cloud users (including the inexperienced ones) to pick-up the needed
|
||||
applications and services and composes the reliable environments out of them
|
||||
in a “push-the-button” manner.
|
||||
DEPRECATED: murano-conductor
|
||||
============================
|
||||
|
||||
murano-conductor
|
||||
----------------
|
||||
murano-conductor is responsible for object model transformation into series of
|
||||
Heat and murano-agent commands.
|
||||
**Warning** - this repository is deprecated. murano-conductor is a part of
|
||||
murano-api now. Also, it was renamed to engine.
|
||||
|
||||
Project Resources
|
||||
-----------------
|
||||
* `Murano at Launchpad <http://launchpad.net/murano>`__
|
||||
* `Wiki <https://wiki.openstack.org/wiki/Murano>`__
|
||||
* `Code Review <https://review.openstack.org/>`__
|
||||
* `Sources <https://wiki.openstack.org/wiki/Murano/SourceCode>`__
|
||||
* `Developers Guide <http://murano-docs.github.io/latest/developers-guide/content/ch02.html>`__
|
||||
|
||||
How To Participate
|
||||
------------------
|
||||
If you would like to ask some questions or make proposals, feel free to reach
|
||||
us on #murano IRC channel at FreeNode. Typically somebody from our team will
|
||||
be online at IRC from 6:00 to 20:00 UTC. You can also contact Murano community
|
||||
directly by openstack-dev@lists.openstack.org adding [Murano] to a subject.
|
||||
|
||||
We’re holding public weekly meetings on Tuesdays at 17:00 UTC
|
||||
on #openstack-meeting-alt IRC channel at FreeNode.
|
||||
|
||||
If you want to contribute either to docs or to code, simply send us change
|
||||
request via `gerrit <https://review.openstack.org/>`__.
|
||||
You can `file bugs <https://bugs.launchpad.net/murano/+filebug>`__ and
|
||||
`register blueprints <https://blueprints.launchpad.net/murano/+addspec>`__ on
|
||||
Launchpad.
|
||||
See: `murano-api <https://git.openstack.org/cgit/stackforge/murano-api>`__
|
||||
|
379
common.inc
379
common.inc
@ -1,379 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Common functions file
|
||||
#
|
||||
DEBUGLVL=2
|
||||
RUN_DIR=${RUN_DIR:-$(cd $(dirname "$0") && pwd)}
|
||||
LOGFILE="$RUN_DIR/install.log"
|
||||
PIPAPPS="pip python-pip pip-python"
|
||||
PIPCMD=""
|
||||
PIPARGS=""
|
||||
TRBL_FILE=""
|
||||
|
||||
if [ "$DEBUGLVL" -eq 4 ]; then
|
||||
set -x
|
||||
fi
|
||||
function log {
|
||||
if [ "$DEBUGLVL" -gt 0 ]; then
|
||||
chars=$(echo "@$" | wc -c)
|
||||
case $DEBUGLVL in
|
||||
1)
|
||||
echo -e "LOG:>$@"
|
||||
;;
|
||||
2)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" | tee --append $LOGFILE
|
||||
;;
|
||||
3)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" >> $LOGFILE
|
||||
;;
|
||||
4)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" | tee --append $LOGFILE
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
function lowercase(){
|
||||
echo "$1" | sed "y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/"
|
||||
}
|
||||
function get_os(){
|
||||
KERNEL=$(uname -r)
|
||||
MACH=$(uname -m)
|
||||
OS=$(uname)
|
||||
if [ "${OS}" = "Linux" ] ; then
|
||||
if [ -f /etc/redhat-release ] ; then
|
||||
DISTRO_BASED_ON='RedHat'
|
||||
PACKAGER='yum'
|
||||
PKG_MGR='rpm'
|
||||
DIST=$(cat /etc/redhat-release |sed s/\ release.*//)
|
||||
PSUEDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//)
|
||||
REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//)
|
||||
elif [ -f /etc/SuSE-release ] ; then
|
||||
DISTRO_BASED_ON='SuSe'
|
||||
PACKAGER='zypper'
|
||||
PKG_MGR='rpm'
|
||||
PSUEDONAME=$(cat /etc/SuSE-release | tr "\n" ' '| sed s/VERSION.*//)
|
||||
REV=$(cat /etc/SuSE-release | tr "\n" ' ' | sed s/.*=\ //)
|
||||
elif [ -f /etc/debian_version ] ; then
|
||||
DISTRO_BASED_ON='Debian'
|
||||
PACKAGER='apt-get'
|
||||
PKG_MGR='dpkg'
|
||||
DIST=$(cat /etc/lsb-release | grep '^DISTRIB_ID' | awk -F= '{ print $2 }')
|
||||
PSUEDONAME=$(cat /etc/lsb-release | grep '^DISTRIB_CODENAME' | awk -F= '{ print $2 }')
|
||||
REV=$(cat /etc/lsb-release | grep '^DISTRIB_RELEASE' | awk -F= '{ print $2 }')
|
||||
fi
|
||||
if [ -f /etc/UnitedLinux-release ] ; then
|
||||
DIST="${DIST}[$(cat /etc/UnitedLinux-release | tr "\n" ' ' | sed s/VERSION.*//)]"
|
||||
fi
|
||||
OS=$(lowercase $OS)
|
||||
DISTRO_BASED_ON=$(lowercase $DISTRO_BASED_ON)
|
||||
readonly OS
|
||||
readonly DIST
|
||||
readonly DISTRO_BASED_ON
|
||||
readonly PSUEDONAME
|
||||
readonly REV
|
||||
readonly KERNEL
|
||||
readonly MACH
|
||||
#readonly PACKAGER
|
||||
else
|
||||
OS=unknown
|
||||
readonly OS
|
||||
log "Unsupported OS:\"$OS\", sorry, exiting!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function find_or_install()
|
||||
{
|
||||
_searching_for=$1
|
||||
_pkg_mrg_cmd=''
|
||||
_pkgr_cmd=''
|
||||
retval=0
|
||||
case $(lowercase $DISTRO_BASED_ON) in
|
||||
"debian")
|
||||
_pkg_mrg_cmd="$PKG_MGR -s $_searching_for"
|
||||
_pkgr_cmd="$PACKAGER install $_searching_for --yes"
|
||||
;;
|
||||
*)
|
||||
_pkg_mrg_cmd="$PKG_MGR -q $_searching_for"
|
||||
_pkgr_cmd="$PACKAGER install $_searching_for -y"
|
||||
;;
|
||||
esac
|
||||
$_pkg_mrg_cmd > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
log "Package \"$_searching_for\" already installed"
|
||||
retval=2
|
||||
else
|
||||
log "Installing \"$_searching_for\"..."
|
||||
$_pkgr_cmd > /dev/null 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
log "...installation fails, exiting!"
|
||||
retval=1
|
||||
else
|
||||
log "...success"
|
||||
retval=0
|
||||
fi
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function is_py_package_installed()
|
||||
{
|
||||
retval=0
|
||||
py_pkg=$1
|
||||
found_pkg=$($PIPCMD freeze | grep -E "^$py_pkg")
|
||||
if [ $? -ne 0 ]; then
|
||||
retval=1
|
||||
fi
|
||||
echo $found_pkg
|
||||
return $retval
|
||||
}
|
||||
function genpass()
|
||||
{
|
||||
echo $(date | md5sum |head -c${5:-13})
|
||||
}
|
||||
function shslash()
|
||||
{
|
||||
echo $1 | sed 's/\//\\\//g'
|
||||
}
|
||||
function split()
|
||||
{
|
||||
# Prefix local names with the function name to try to avoid conflicts
|
||||
# local split_wordlist
|
||||
split_wordlist="$1"
|
||||
shift
|
||||
read "$@" <<EOF-split-end-of-arguments
|
||||
${split_wordlist}
|
||||
EOF-split-end-of-arguments
|
||||
}
|
||||
# Returns true if v1 >= v2, false if v1 < v2
|
||||
function version_ge()
|
||||
{
|
||||
# Prefix local names with the function name to try to avoid conflicts
|
||||
# local version_ge_1 version_ge_2 version_ge_a version_ge_b
|
||||
# local version_ge_save_ifs
|
||||
version_ge_v1="$1"
|
||||
version_ge_v2="$2"
|
||||
version_ge_save_ifs="$IFS"
|
||||
while test -n "${version_ge_v1}${version_ge_v2}"; do
|
||||
IFS="."
|
||||
split "$version_ge_v1" version_ge_a version_ge_v1
|
||||
split "$version_ge_v2" version_ge_b version_ge_v2
|
||||
IFS="$version_ge_save_ifs"
|
||||
#echo " compare $version_ge_a $version_ge_b"
|
||||
test "0$version_ge_a" -gt "0$version_ge_b" && return 0 # v1>v2: true
|
||||
test "0$version_ge_a" -lt "0$version_ge_b" && return 1 # v1<v2:false
|
||||
done
|
||||
# version strings are both empty & no differences found - must be equal.
|
||||
return 0 # v1==v2: true
|
||||
}
|
||||
function find_pip()
|
||||
{
|
||||
_pipargs=""
|
||||
pip_min_ver="1.4"
|
||||
for cmd in $PIPAPPS
|
||||
do
|
||||
_cmd=$(which $cmd 2>/dev/null)
|
||||
if [ $? -eq 0 ];then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$_cmd" ];then
|
||||
echo "Can't find \"pip\" in system, please install it first, exiting!"
|
||||
exit 1
|
||||
else
|
||||
_pip_ver=$($_cmd --version | grep -oE "[0-9]\.[0-9]" | head -n1)
|
||||
if [ -n "$_pip_ver" ]; then
|
||||
version_ge $_pip_ver $pip_min_ver
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Upgrading pip ..."
|
||||
$_cmd install --upgrade pip==$pip_min_ver
|
||||
if [ $? -ne 0 ]; then
|
||||
log "...pip upgrade fails, exiting!"
|
||||
exit 1
|
||||
else
|
||||
log "...success"
|
||||
sleep 2
|
||||
for cmd in $PIPAPPS
|
||||
do
|
||||
_cmd=$(which $cmd 2>/dev/null)
|
||||
if [ $? -eq 0 ];then
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
_pip_ver=$($_cmd --version | grep -oE "[0-9]\.[0-9]" | head -n1)
|
||||
version_ge $_pip_ver "1.5"
|
||||
if [ $? -eq 0 ]; then
|
||||
log "For future use, sorry, use pip v$pip_min_ver, exiting!"
|
||||
exit 1
|
||||
##_pipargs="--allow-unverified --allow-external"
|
||||
#_pipargs="--allow-all-external"
|
||||
#mk_dir "/root/.pip"
|
||||
#_pipcfg="/root/.pip/pip.conf"
|
||||
#if [ ! -f "$_pipcfg" ]; then
|
||||
# touch $_pipcfg
|
||||
#fi
|
||||
#iniset 'install' 'allow-all-external' 'true' "$_pipcfg"
|
||||
#iniset 'install' 'allow-all-unverified' 'true' "$_pipcfg"
|
||||
#log "Setuptools upgrade required..."
|
||||
#$cmd install setuptools --no-use-wheel --upgrade >> $LOGFILE 2>&1
|
||||
#if [ $? -ne 0 ]; then
|
||||
# log "...upgrade fails, exiting"
|
||||
# exit 1
|
||||
#else
|
||||
# log "...success"
|
||||
#fi
|
||||
fi
|
||||
log "Found pip version - $_pip_ver"
|
||||
fi
|
||||
PIPARGS=$_pipargs
|
||||
PIPCMD=$_cmd
|
||||
fi
|
||||
}
|
||||
function add_daemon_credentials()
|
||||
{
|
||||
retval=0
|
||||
daemonuser=${1:-murano}
|
||||
daemongroup=${2:-murano}
|
||||
daemonhomedir=${3:-/home/$daemonuser}
|
||||
getent group $daemongroup > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Creating group \"$daemongroup\"..."
|
||||
groupadd -r $daemongroup
|
||||
if [ $? -eq 0 ]; then
|
||||
log "...success"
|
||||
else
|
||||
log "Can't create \"$daemongroup\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log "Group \"$daemongroup\" exists"
|
||||
fi
|
||||
getent passwd $daemonuser > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Creating user \"$daemonuser\"..."
|
||||
useradd -r -g $daemongroup -G $daemongroup -d $daemonhomedir -s $(which nologin) -c "Murano Daemons" $daemonuser
|
||||
if [ $? -eq 0 ]; then
|
||||
log "...success"
|
||||
else
|
||||
log "Can't create \"$daemonuser\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log "User \"$daemonuser\" exists"
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function remove_daemon_credentials()
|
||||
{
|
||||
retval=0
|
||||
daemonuser=${1:-murano}
|
||||
daemongroup=${2:-murano}
|
||||
daemonhomedir=${3:-/home/$daemonuser}
|
||||
getent passwd $daemonuser > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
log "Deleting user \"$daemonuser\"..."
|
||||
userdel -f $daemonuser
|
||||
if [ $? -eq 0 ]; then
|
||||
if [ -d "$daemonhomedir" ]; then
|
||||
rm -rf $daemonhomedir
|
||||
fi
|
||||
log "...success"
|
||||
else
|
||||
log "Can't delete \"$daemonuser\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
getent group $daemongroup > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
log "Deleting group \"$daemongroup\"..."
|
||||
groupdel $daemongroup
|
||||
if [ $? -eq 0 ]; then
|
||||
log "...success"
|
||||
else
|
||||
log "Can't delete \"$daemongroup\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function iniset()
|
||||
{
|
||||
local section=$1
|
||||
local option=$2
|
||||
local value=$3
|
||||
local file=$4
|
||||
local line
|
||||
|
||||
if [ -z "$section" ] ; then
|
||||
# No section name specified
|
||||
sed -i -e "s/^\($option[ \t]*=[ \t]*\).*$/\1$value/" "$file"
|
||||
else
|
||||
# Check if section already exists
|
||||
if ! grep -q "^\[$section\]" "$file" ; then
|
||||
# Add section at the end
|
||||
echo -e "\n[$section]" >>"$file"
|
||||
fi
|
||||
|
||||
# Check if parameter in the section exists
|
||||
line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
|
||||
if [ -z "$line" ] ; then
|
||||
# Add parameter if it is not exists
|
||||
sed -i -e "/^\[$section\]/ a\\
|
||||
$option = $value
|
||||
" "$file"
|
||||
else
|
||||
# Replace existing parameter
|
||||
sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
function mk_dir()
|
||||
{
|
||||
retval=0
|
||||
path_to_check=$1
|
||||
if [ -d "$path_to_check" ]; then
|
||||
log "Path \"$path_to_check\" already exists."
|
||||
elif [ -f "$path_to_check" ]; then
|
||||
log "Path \"path_to_check\" is an existing file, exiting!"
|
||||
exit 1
|
||||
else
|
||||
mkdir -p "$path_to_check"
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't create \"$path_to_check\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [ $# -eq 3 ]; then
|
||||
owner_user=$2
|
||||
owner_group=$3
|
||||
chown -R $owner_user:$owner_group $path_to_check
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't set ownership to \"$owner_user:$owner_group\" for \"$path_to_check\", exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function get_service_exec_path()
|
||||
{
|
||||
retval=0
|
||||
if [ -z "$SERVICE_EXEC_PATH" ]; then
|
||||
SERVICE_EXEC_PATH=$(which $DAEMON_NAME)
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't find \"$DAEMON_NAME\", please install the \"$SERVICE_SRV_NAME\" by running \"$(basename "$0") install\" or set variable SERVICE_EXEC_PATH=/path/to/daemon before running setup script, exiting!"
|
||||
retval=1
|
||||
fi
|
||||
else
|
||||
if [ ! -x "$SERVICE_EXEC_PATH" ]; then
|
||||
log "\"$SERVICE_EXEC_PATH\" in not executable, please install the \"$DAEMON_NAME\" or set variable SERVICE_EXEC_PATH=/path/to/daemon before running setup script, exiting!"
|
||||
retval=1
|
||||
fi
|
||||
fi
|
||||
return $retval
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
[theme]
|
||||
inherit = default
|
@ -1,242 +0,0 @@
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2010 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Conductor documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue February 28 13:50:15 2013.
|
||||
#
|
||||
# This file is execfile()'d with the current directory set to its containing
|
||||
# dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path = [os.path.abspath('../../conductor'),
|
||||
os.path.abspath('../..'),
|
||||
os.path.abspath('../../bin')
|
||||
] + sys.path
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.coverage',
|
||||
'sphinx.ext.ifconfig',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.pngmath',
|
||||
'sphinx.ext.graphviz']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = []
|
||||
if os.getenv('HUDSON_PUBLISH_DOCS'):
|
||||
templates_path = ['_ga', '_templates']
|
||||
else:
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Conductor'
|
||||
copyright = u'2013, Mirantis, Inc.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
from muranoconductor.version import version_info as conductor_version
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = conductor_version.version_string_with_vcs()
|
||||
# The short X.Y version.
|
||||
version = conductor_version.canonical_version_string()
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of documents that shouldn't be included in the build.
|
||||
#unused_docs = []
|
||||
|
||||
# List of directories, relative to source directory, that shouldn't be searched
|
||||
# for source files.
|
||||
exclude_trees = ['api']
|
||||
|
||||
# The reST default role (for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
show_authors = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
modindex_common_prefix = ['portas.']
|
||||
|
||||
# -- Options for man page output --------------------------------------------
|
||||
|
||||
# Grouping the document tree for man pages.
|
||||
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
|
||||
|
||||
man_pages = [
|
||||
('man/conductor', 'conductor', u'Conductor Orchestrator',
|
||||
[u'Mirantis, Inc.'], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
html_theme_path = ["."]
|
||||
html_theme = '_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = ['_theme']
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
|
||||
html_last_updated_fmt = os.popen(git_cmd).read()
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
html_use_modindex = False
|
||||
|
||||
# If false, no index is generated.
|
||||
html_use_index = False
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = ''
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'conductordoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'Conductor.tex', u'Conductor Documentation',
|
||||
u'Murano Team', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_use_modindex = True
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
|
@ -1,147 +0,0 @@
|
||||
..
|
||||
Copyright 2010 OpenStack Foundation
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
=======================================================
|
||||
Welcome to Conductor, the Murano orchestration engine!
|
||||
=======================================================
|
||||
|
||||
Conductor is an Murano orchestration engine that transforms object model sent by
|
||||
REST API service into a series of Heat and Murano-Agent commands
|
||||
|
||||
This document describes Conductor for contributors of the project.
|
||||
|
||||
This documentation is generated by the Sphinx toolkit and lives in the source
|
||||
tree.
|
||||
|
||||
Installation Guide
|
||||
==================
|
||||
Install
|
||||
-------
|
||||
|
||||
1. Check out sources to some directory (<home>/murano)::
|
||||
|
||||
user@work:~/git clone https://github.com/Mirantis/murano-conductor.git
|
||||
|
||||
2. Install Conductor::
|
||||
|
||||
user@work:~/cd murano/conductor && sudo python setup.py install
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
1. Open configuration file for editing::
|
||||
|
||||
user@work:~/cd murano/muranoconductor/etc && nano conductor.conf
|
||||
|
||||
2. Configure according to you environment::
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
# Path where log will be written
|
||||
log_file = /tmp/conductor.log
|
||||
|
||||
# Log verbosity
|
||||
debug=True
|
||||
verbose=True
|
||||
|
||||
# Directory where conductor's data directory located.
|
||||
# "data" must be subdirectory to this.
|
||||
data_dir = /etc/murano-conductor/metadata-cache
|
||||
|
||||
# Provide url to Murano Metadata repository
|
||||
murano_metadata_url = http://localhost:8084
|
||||
|
||||
# Maximum number of environments that can be processed simultaneously
|
||||
max_environments = 20
|
||||
|
||||
# Maximum number of VMs per environment
|
||||
max_hosts = 250
|
||||
|
||||
# Template IP address for generating environment subnet cidrs
|
||||
env_ip_template = 10.0.0.0
|
||||
|
||||
# Enforces default network topology.
|
||||
# Allowed values: nova, flat, routed
|
||||
# default is routed
|
||||
network_topology = routed
|
||||
|
||||
[keystone]
|
||||
# URL of OpenStack KeyStone service REST API.
|
||||
# Typically only hostname (or IP) needs to be changed
|
||||
auth_url = http://localhost:5000/v2.0
|
||||
|
||||
# Keystone SSL parameters
|
||||
# Optional CA cert file to use in SSL connections
|
||||
ca_file =
|
||||
# Optional PEM-formatted certificate chain file
|
||||
cert_file =
|
||||
# Optional PEM-formatted file that contains the private key
|
||||
key_file =
|
||||
# If set then the server's certificate will not be verified
|
||||
insecure = False
|
||||
|
||||
[heat]
|
||||
# Heat SSL parameters
|
||||
# Optional CA cert file to use in SSL connections
|
||||
ca_file =
|
||||
# Optional PEM-formatted certificate chain file
|
||||
cert_file =
|
||||
# Optional PEM-formatted file that contains the private key
|
||||
key_file =
|
||||
# If set then the server's certificate will not be verified
|
||||
insecure = False
|
||||
# Valid endpoint types: publicURL (default), internalURL, adminURL
|
||||
endpoint_type = publicURL
|
||||
|
||||
[neutron]
|
||||
# Optional CA cert file to use in SSL connections
|
||||
#ca_cert =
|
||||
# Allow self signed server certificate
|
||||
insecure = False
|
||||
# Valid endpoint types: publicURL (default), internalURL, adminURL
|
||||
endpoint_type = publicURL
|
||||
|
||||
[rabbitmq]
|
||||
# Connection parameters to RabbitMQ service
|
||||
|
||||
# Hostname or IP address where RabbitMQ is located.
|
||||
# !!! Change localhost to your real IP or hostname as this
|
||||
# address must be reachable from VMs !!!
|
||||
host = localhost
|
||||
|
||||
# RabbitMQ port (5672 is a default)
|
||||
port = 5672
|
||||
|
||||
# RabbitMQ credentials. Fresh RabbitMQ installation has "guest"
|
||||
# account with "guest" password.
|
||||
# It is recommended to create dedicated user account for Murano using
|
||||
# RabbitMQ web console or command line utility
|
||||
login = guest
|
||||
password = guest
|
||||
|
||||
# RabbitMQ virtual host (vhost). Fresh RabbitMQ installation
|
||||
# has "/" vhost preconfigured.
|
||||
# It is recommended to create dedicated vhost for Murano using
|
||||
# RabbitMQ web console or command line utility
|
||||
virtual_host = /
|
||||
|
||||
Run
|
||||
----
|
||||
|
||||
Run Conductor and supply valid configuration file::
|
||||
|
||||
user@work:~/cd murano/conductor && conductor --config-file=./murano/conductor/etc/conductor.conf
|
||||
|
@ -1,4 +0,0 @@
|
||||
SysV init scripts
|
||||
=====================
|
||||
murano-conductor-redhat - for RedHat based Linux distibution
|
||||
murano-conductor-debian - for Debian based Linux distibution
|
@ -1,104 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Author: Igor Yozhikov <iyozhikov@mirantis.com>
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: murano-conductor
|
||||
# Required-Start: $network $local_fs $remote_fs $syslog
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: OpenStack Murano Conductor Service
|
||||
# Description: This startup script launches murano-conductor service daemon.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
|
||||
DESC="murano-conductor"
|
||||
NAME=murano-conductor
|
||||
DAEMON=$(which muranoconductor)
|
||||
PIDFILE=/var/run/murano/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/openstack-$NAME
|
||||
SYSTEM_USER=murano
|
||||
CONFIG_FILE=/etc/murano/murano-conductor.conf
|
||||
# Exit if the package is not installed
|
||||
[ -x $DAEMON ] || exit 5
|
||||
|
||||
# source function library
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
do_start()
|
||||
{
|
||||
if [ ! -d "/var/run/murano" ]; then
|
||||
mkdir -p /var/run/murano
|
||||
chown -R $SYSTEM_USER /var/run/murano
|
||||
fi
|
||||
start-stop-daemon --start --background --quiet --chuid $SYSTEM_USER:$SYSTEM_USER --make-pidfile --pidfile $PIDFILE --startas $DAEMON --test -- --config-file=$CONFIG_FILE > /dev/null || return 1
|
||||
start-stop-daemon --start --background --quiet --chuid $SYSTEM_USER:$SYSTEM_USER --make-pidfile --pidfile $PIDFILE --startas $DAEMON -- --config-file=$CONFIG_FILE || return 2
|
||||
}
|
||||
|
||||
do_stop()
|
||||
{
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
|
||||
RETVAL="$?"
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
@ -1,102 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Author: Igor Yozhikov <iyozhikov@mirantis.com>
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: murano-conductor
|
||||
# Required-Start: $network $local_fs $remote_fs $syslog
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: OpenStack Murano Conductor Service
|
||||
# Description: This startup script launches murano-conductor service daemon.
|
||||
### END INIT INFO
|
||||
# chkconfig: 3 90 10
|
||||
# description: This startup script launches murano-conductor service daemon.
|
||||
# config: /etc/murano/murano-conductor.conf, /etc/murano/murano-conductor-paste.ini
|
||||
#
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
|
||||
DESC="murano-conductor"
|
||||
NAME=murano-conductor
|
||||
DAEMON=$(which muranoconductor)
|
||||
PIDFILE=/var/run/murano/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/openstack-$NAME
|
||||
SYSTEM_USER=murano
|
||||
CONFIG_FILE=/etc/murano/murano-conductor.conf
|
||||
LOCKFILE=/var/lock/subsys/$NAME
|
||||
# Exit if the package is not installed
|
||||
[ -x $DAEMON ] || exit 5
|
||||
|
||||
# source function library
|
||||
. /etc/init.d/functions
|
||||
|
||||
RETVAL=0
|
||||
|
||||
|
||||
start() {
|
||||
if [ ! -d "/var/run/murano" ]; then
|
||||
mkdir -p /var/run/murano
|
||||
chown -R $SYSTEM_USER /var/run/murano
|
||||
fi
|
||||
echo -n "Starting $NAME: "
|
||||
daemon --user $SYSTEM_USER "$DAEMON --config-file=$CONFIG_FILE &>/dev/null & echo \$! > $PIDFILE"
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && touch $LOCKFILE
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping $NAME: "
|
||||
#killproc $DAEMON -TERM
|
||||
killproc -p $PIDFILE $DAEMON
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && rm -f $LOCKFILE
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status $DAEMON
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
|
||||
restart)
|
||||
restart
|
||||
;;
|
||||
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart}"
|
||||
exit 2
|
||||
esac
|
||||
exit $?
|
@ -1,36 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<configuration>
|
||||
<configSections>
|
||||
<section name="nlog" type="NLog.Config.ConfigSectionHandler, NLog"/>
|
||||
</configSections>
|
||||
<startup>
|
||||
<supportedRuntime version="v4.0" sku=".NETFramework,Version=v4.5" />
|
||||
</startup>
|
||||
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<targets>
|
||||
<target name="file" xsi:type="File" fileName="${basedir}/log.txt"
|
||||
layout="${date} ${level}: <${logger:shortName=true}> ${message} ${exception:format=tostring}"/>
|
||||
</targets>
|
||||
|
||||
<rules>
|
||||
<logger name="*" minlevel="Debug" writeTo="file" />
|
||||
</rules>
|
||||
</nlog>
|
||||
<appSettings>
|
||||
<add key="rabbitmq.host" value="%RABBITMQ_HOST%"/>
|
||||
<add key="rabbitmq.port" value="%RABBITMQ_PORT%"/>
|
||||
<add key="rabbitmq.user" value="%RABBITMQ_USER%"/>
|
||||
<add key="rabbitmq.password" value="%RABBITMQ_PASSWORD%"/>
|
||||
<add key="rabbitmq.vhost" value="%RABBITMQ_VHOST%"/>
|
||||
<add key="rabbitmq.inputQueue" value="%RABBITMQ_INPUT_QUEUE%"/>
|
||||
<add key="rabbitmq.resultExchange" value=""/>
|
||||
<add key="rabbitmq.resultRoutingKey" value="%RESULT_QUEUE%"/>
|
||||
<add key="rabbitmq.durableMessages" value="true"/>
|
||||
|
||||
<add key="rabbitmq.ssl" value="%RABBITMQ_SSL%"/>
|
||||
<add key="rabbitmq.allowInvalidCA" value="true"/>
|
||||
<add key="rabbitmq.sslServerName" value=""/>
|
||||
|
||||
</appSettings>
|
||||
</configuration>
|
@ -1,8 +0,0 @@
|
||||
RABBITMQ_HOST = "%RABBITMQ_HOST%"
|
||||
RABBITMQ_PORT = "%RABBITMQ_PORT%"
|
||||
RABBITMQ_USERNAME = "%RABBITMQ_USER%"
|
||||
RABBITMQ_PASSWORD = "%RABBITMQ_PASSWORD%"
|
||||
RABBITMQ_VHOST = "%RABBITMQ_VHOST%"
|
||||
RABBITMQ_INPUT_QUEUE = "%RABBITMQ_INPUT_QUEUE%"
|
||||
RESULT_QUEUE = "%RESULT_QUEUE%"
|
||||
RABBITMQ_RESULT_ROUTING_KEY = "%RESULT_QUEUE%"
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
debug=True
|
||||
verbose=True
|
||||
log_file = /var/log/murano-agnet.log
|
||||
|
||||
storage=/var/murano/plans
|
||||
|
||||
[rabbitmq]
|
||||
|
||||
# Input queue name
|
||||
input_queue = %RABBITMQ_INPUT_QUEUE%
|
||||
|
||||
# Output routing key (usually queue name)
|
||||
result_routing_key = %RESULT_QUEUE%
|
||||
|
||||
# Connection parameters to RabbitMQ service
|
||||
|
||||
# Hostname or IP address where RabbitMQ is located.
|
||||
host = %RABBITMQ_HOST%
|
||||
|
||||
# RabbitMQ port (5672 is a default)
|
||||
port = %RABBITMQ_PORT%
|
||||
|
||||
# Use SSL for RabbitMQ connections (True or False)
|
||||
ssl = %RABBITMQ_SSL%
|
||||
|
||||
# Path to SSL CA certificate or empty to allow self signed server certificate
|
||||
ca_certs =
|
||||
|
||||
# RabbitMQ credentials. Fresh RabbitMQ installation has "guest" account with "guest" password.
|
||||
login = %RABBITMQ_USER%
|
||||
password = %RABBITMQ_PASSWORD%
|
||||
|
||||
# RabbitMQ virtual host (vhost). Fresh RabbitMQ installation has "/" vhost preconfigured.
|
||||
virtual_host = %RABBITMQ_VHOST%
|
@ -1,110 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
# Set up logging. To use syslog just set use_syslog parameter value to 'True'
|
||||
log_file = /tmp/murano-conductor.log
|
||||
|
||||
# Use syslog for logging. Existing syslog format is DEPRECATED
|
||||
# during I, and then will be changed in J to honor RFC5424
|
||||
|
||||
use_syslog = False
|
||||
|
||||
# (Optional) Use syslog rfc5424 format for logging. If
|
||||
# enabled, will add APP-NAME (RFC5424) before the MSG part of
|
||||
# the syslog message. The old format without APP-NAME is
|
||||
# deprecated in I, and will be removed in J.
|
||||
#use_syslog_rfc_format=false
|
||||
|
||||
#Syslog facility to receive log lines
|
||||
syslog_log_facility = LOG_LOCAL0
|
||||
|
||||
# Log verbosity
|
||||
debug = True
|
||||
verbose = True
|
||||
|
||||
# Provide directory with initialization scripts
|
||||
init_scripts_dir = etc/murano/init-scripts
|
||||
|
||||
# Provide directory with agent configs
|
||||
agent_config_dir = etc/murano/agent-config
|
||||
|
||||
# Directory for data cache, OS temp directory is used by default
|
||||
data_dir = /tmp/muranoconductor-cache
|
||||
|
||||
# Provide url to Murano Metadata repository
|
||||
# Comment this line if you registered murano-metadata in keystone catalog
|
||||
murano_metadata_url = http://localhost:8084/v1
|
||||
|
||||
# Maximum number of environments that can be processed simultaneously
|
||||
max_environments = 20
|
||||
|
||||
# Maximum number of VMs per environment
|
||||
max_hosts = 250
|
||||
|
||||
# Template IP address for generating environment subnet cidrs
|
||||
env_ip_template = 10.0.0.0
|
||||
|
||||
# Enforces default network topology.
|
||||
# Allowed values: nova, flat, routed
|
||||
# default is routed
|
||||
network_topology = routed
|
||||
|
||||
[keystone]
|
||||
# URL of OpenStack KeyStone service REST API.
|
||||
# Typically only hostname (or IP) needs to be changed
|
||||
auth_url = http://localhost:5000/v2.0
|
||||
|
||||
# Keystone SSL parameters
|
||||
# Optional CA cert file to use in SSL connections
|
||||
#ca_file =
|
||||
# Optional PEM-formatted certificate chain file
|
||||
#cert_file =
|
||||
# Optional PEM-formatted file that contains the private key
|
||||
#key_file =
|
||||
# If set then the server's certificate will not be verified
|
||||
insecure = False
|
||||
|
||||
[heat]
|
||||
# Heat SSL parameters
|
||||
# Optional CA cert file to use in SSL connections
|
||||
#ca_file =
|
||||
# Optional PEM-formatted certificate chain file
|
||||
#cert_file =
|
||||
# Optional PEM-formatted file that contains the private key
|
||||
#key_file =
|
||||
# If set then the server's certificate will not be verified
|
||||
insecure = False
|
||||
# Valid endpoint types: publicURL (default), internalURL, adminURL
|
||||
endpoint_type = publicURL
|
||||
|
||||
[neutron]
|
||||
# Optional CA cert file to use in SSL connections
|
||||
#ca_cert =
|
||||
# Allow self signed server certificate
|
||||
insecure = False
|
||||
# Valid endpoint types: publicURL (default), internalURL, adminURL
|
||||
endpoint_type = publicURL
|
||||
|
||||
[rabbitmq]
|
||||
# Connection parameters to RabbitMQ service
|
||||
|
||||
# Hostname or IP address where RabbitMQ is located.
|
||||
# !!! Change localhost to your real IP or hostname as this address must be reachable from VMs !!!
|
||||
host = localhost
|
||||
|
||||
# RabbitMQ port (5672 is a default)
|
||||
port = 5672
|
||||
|
||||
# Use SSL for RabbitMQ connections (True or False)
|
||||
ssl = False
|
||||
|
||||
# Path to SSL CA certificate or empty to allow self signed server certificate
|
||||
#ca_certs =
|
||||
|
||||
# RabbitMQ credentials. Fresh RabbitMQ installation has "guest" account with "guest" password.
|
||||
# It is recommended to create dedicated user account for Murano using RabbitMQ web console or command line utility
|
||||
login = guest
|
||||
password = guest
|
||||
|
||||
# RabbitMQ virtual host (vhost). Fresh RabbitMQ installation has "/" vhost preconfigured.
|
||||
# It is recommended to create dedicated vhost for Murano using RabbitMQ web console or command line utility
|
||||
virtual_host = /
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
AgentConfigBase64='%AGENT_CONFIG_BASE64%'
|
||||
|
||||
mkdir /etc/murano
|
||||
|
||||
echo $AgentConfigBase64 | base64 -d > /etc/murano/agent.config
|
||||
|
||||
chmod 664 /etc/murano/agent.config
|
||||
sleep 10
|
||||
reboot
|
@ -1,68 +0,0 @@
|
||||
#ps1
|
||||
|
||||
$WindowsAgentConfigBase64 = '%AGENT_CONFIG_BASE64%'
|
||||
$WindowsAgentConfigFile = "C:\Murano\Agent\WindowsAgent.exe.config"
|
||||
$WindowsAgentLogFile = "C:\Murano\Agent\log.txt"
|
||||
|
||||
$NewComputerName = '%INTERNAL_HOSTNAME%'
|
||||
$MuranoFileShare = '\\%MURANO_SERVER_ADDRESS%\share'
|
||||
|
||||
$CaRootCertBase64 = "%CA_ROOT_CERT_BASE64%"
|
||||
$CaRootCertFile = "C:\Murano\ca.cert"
|
||||
|
||||
$RestartRequired = $false
|
||||
|
||||
Import-Module CoreFunctions
|
||||
Initialize-Logger 'CloudBase-Init' 'C:\Murano\PowerShell.log'
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
trap {
|
||||
Write-LogError '<exception>'
|
||||
Write-LogError $_ -EntireObject
|
||||
Write-LogError '</exception>'
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Log "Importing CA certificate ..."
|
||||
if ($CaRootCertBase64 -eq '') {
|
||||
Write-Log "Importing CA certificate ... skipped"
|
||||
}
|
||||
else {
|
||||
ConvertFrom-Base64String -Base64String $CaRootCertBase64 -Path $CaRootCertFile
|
||||
$cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2 $CaRootCertFile
|
||||
$store = New-Object System.Security.Cryptography.X509Certificates.X509Store("AuthRoot","LocalMachine")
|
||||
$store.Open("MaxAllowed")
|
||||
$store.Add($cert)
|
||||
$store.Close()
|
||||
Write-Log "Importing CA certificate ... done"
|
||||
}
|
||||
|
||||
Write-Log "Updating Murano Windows Agent."
|
||||
Stop-Service "Murano Agent"
|
||||
Backup-File $WindowsAgentConfigFile
|
||||
Remove-Item $WindowsAgentConfigFile -Force -ErrorAction 'SilentlyContinue'
|
||||
Remove-Item $WindowsAgentLogFile -Force -ErrorAction 'SilentlyContinue'
|
||||
ConvertFrom-Base64String -Base64String $WindowsAgentConfigBase64 -Path $WindowsAgentConfigFile
|
||||
Exec sc.exe 'config','"Murano Agent"','start=','delayed-auto'
|
||||
Write-Log "Service has been updated."
|
||||
|
||||
Write-Log "Adding environment variable 'MuranoFileShare' = '$MuranoFileShare' ..."
|
||||
[Environment]::SetEnvironmentVariable('MuranoFileShare', $MuranoFileShare, [EnvironmentVariableTarget]::Machine)
|
||||
Write-Log "Environment variable added."
|
||||
|
||||
Write-Log "Renaming computer to '$NewComputerName' ..."
|
||||
$null = Rename-Computer -NewName $NewComputerName -Force
|
||||
|
||||
Write-Log "New name assigned, restart required."
|
||||
$RestartRequired = $true
|
||||
|
||||
|
||||
Write-Log 'All done!'
|
||||
if ( $RestartRequired ) {
|
||||
Write-Log "Restarting computer ..."
|
||||
Restart-Computer -Force
|
||||
}
|
||||
else {
|
||||
Start-Service 'Murano Agent'
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
service murano-agent stop
|
||||
|
||||
AgentConfigBase64='%AGENT_CONFIG_BASE64%'
|
||||
|
||||
mkdir /etc/murano
|
||||
echo $AgentConfigBase64 | base64 -d > /etc/murano/agent.conf
|
||||
chmod 664 /etc/murano/agent.conf
|
||||
|
||||
service murano-agent start
|
@ -1,9 +0,0 @@
|
||||
OpenStack Heat plugin for Murano
|
||||
|
||||
Copy murano directory to resources directory of your Heat installation
|
||||
(/opt/stack/heat/heat/engine/resources for DevStack)
|
||||
|
||||
Plugin exposes new Heat resource of type Murano::Environment with two properties:
|
||||
1. Body - JSON-encoded Murano environment definition
|
||||
2. MuranoApiEndpoint - optional Murano API endpoint URL.
|
||||
If not specified then it aumatically discovered via Keystone
|
@ -1,104 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import eventlet
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.engine import resource
|
||||
from muranoclient.v1.client import Client
|
||||
from muranoclient.common.exceptions import HTTPNotFound
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MuranoEnvironment(resource.Resource):
|
||||
properties_schema = {
|
||||
'Body': {'Type': 'Map', 'Required': True},
|
||||
'MuranoApiEndpoint': {'Type': 'String'}
|
||||
}
|
||||
update_allowed_keys = ('Metadata', 'Properties')
|
||||
update_allowed_properties = ('Definition', 'MuranoApiEndpoint')
|
||||
attributes_schema = {}
|
||||
|
||||
def __init__(self, name, json_snippet, stack):
|
||||
super(MuranoEnvironment, self).__init__(name, json_snippet, stack)
|
||||
|
||||
def handle_create(self):
|
||||
self._update_environment()
|
||||
|
||||
def handle_delete(self):
|
||||
client = self._muranoclient()
|
||||
environment_id = self._find_environment(client)
|
||||
if environment_id:
|
||||
client.environments.delete(environment_id)
|
||||
try:
|
||||
self._wait_deployed(client, environment_id)
|
||||
except HTTPNotFound:
|
||||
pass
|
||||
|
||||
def _find_environment(self, client):
|
||||
environments = client.environments.list()
|
||||
for environment in environments:
|
||||
if environment.name == self.name:
|
||||
return environment.id
|
||||
return None
|
||||
|
||||
def _update_environment(self):
|
||||
client = self._muranoclient()
|
||||
environment_id = self._find_environment(client)
|
||||
if not environment_id:
|
||||
environment_id = client.environments.create(self.name).id
|
||||
|
||||
session_id = client.sessions.configure(environment_id).id
|
||||
environment = self.properties.get('Body')
|
||||
client.services.post(environment_id,
|
||||
path='/',
|
||||
data=environment.get('services', []),
|
||||
session_id=session_id)
|
||||
client.sessions.deploy(environment_id, session_id)
|
||||
self._wait_deployed(client, environment_id)
|
||||
|
||||
def _wait_deployed(self, client, environment_id):
|
||||
i = 0
|
||||
delay = 2
|
||||
while True:
|
||||
environment = client.environments.get(environment_id)
|
||||
if environment.status == 'pending' and i > 5 * 60:
|
||||
raise EnvironmentError(
|
||||
"Environment deployment hasn't started")
|
||||
elif environment.status == 'deploying' and i > 65 * 60:
|
||||
raise EnvironmentError(
|
||||
"Environment deployment takes too long")
|
||||
elif environment.status == 'ready':
|
||||
break
|
||||
eventlet.sleep(delay)
|
||||
i += delay
|
||||
|
||||
def _muranoclient(self):
|
||||
endpoint = self._get_endpoint()
|
||||
token = self.stack.clients.auth_token
|
||||
return Client(endpoint=endpoint, token=token)
|
||||
|
||||
def _get_endpoint(self):
|
||||
#prefer location specified in settings for dev purposes
|
||||
endpoint = self.properties.get('MuranoApiEndpoint')
|
||||
if not endpoint:
|
||||
endpoint = self.stack.clients.url_for(service_type='murano')
|
||||
return endpoint
|
||||
|
||||
|
||||
def resource_mapping():
|
||||
return {
|
||||
'Murano::Environment': MuranoEnvironment
|
||||
}
|
4
logs/.gitignore
vendored
4
logs/.gitignore
vendored
@ -1,4 +0,0 @@
|
||||
# Ignore everything in this directory
|
||||
*
|
||||
# Except this file
|
||||
!.gitignore
|
@ -1,20 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gettext
|
||||
gettext.install('muranoconductor', './muranoconductor/locale', unicode=1)
|
||||
|
||||
from pbr import version
|
||||
__version_info = version.VersionInfo('murano-conductor')
|
||||
__version__ = __version_info.cached_version_string()
|
@ -1,192 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import glob
|
||||
import sys
|
||||
|
||||
import anyjson
|
||||
import eventlet
|
||||
from muranoconductor.openstack.common import service
|
||||
from workflow import Workflow
|
||||
from commands.dispatcher import CommandDispatcher
|
||||
from openstack.common import log as logging
|
||||
from config import Config
|
||||
import reporting
|
||||
from muranocommon.messaging import MqClient, Message
|
||||
from muranoconductor import config as cfg
|
||||
from muranocommon.helpers.token_sanitizer import TokenSanitizer
|
||||
from muranoconductor import metadata
|
||||
import vm_agent
|
||||
import cloud_formation
|
||||
import network
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConductorWorkflowService(service.Service):
|
||||
def __init__(self):
|
||||
super(ConductorWorkflowService, self).__init__()
|
||||
|
||||
def start(self):
|
||||
super(ConductorWorkflowService, self).start()
|
||||
self.tg.add_thread(self._start_rabbitmq)
|
||||
|
||||
def stop(self):
|
||||
super(ConductorWorkflowService, self).stop()
|
||||
|
||||
def create_rmq_client(self):
|
||||
rabbitmq = cfg.CONF.rabbitmq
|
||||
connection_params = {
|
||||
'login': rabbitmq.login,
|
||||
'password': rabbitmq.password,
|
||||
'host': rabbitmq.host,
|
||||
'port': rabbitmq.port,
|
||||
'virtual_host': rabbitmq.virtual_host,
|
||||
'ssl': rabbitmq.ssl,
|
||||
'ca_certs': rabbitmq.ca_certs.strip() or None
|
||||
}
|
||||
return MqClient(**connection_params)
|
||||
|
||||
def _start_rabbitmq(self):
|
||||
reconnect_delay = 1
|
||||
while True:
|
||||
try:
|
||||
with self.create_rmq_client() as mq:
|
||||
mq.declare('tasks', 'tasks', enable_ha=True)
|
||||
mq.declare('task-results', enable_ha=True)
|
||||
with mq.open('tasks',
|
||||
prefetch_count=
|
||||
cfg.CONF.max_environments) as subscription:
|
||||
reconnect_delay = 1
|
||||
while True:
|
||||
msg = subscription.get_message(timeout=2)
|
||||
if msg is not None:
|
||||
eventlet.spawn(self._task_received, msg)
|
||||
except Exception as ex:
|
||||
log.exception(ex)
|
||||
|
||||
eventlet.sleep(reconnect_delay)
|
||||
reconnect_delay = min(reconnect_delay * 2, 60)
|
||||
|
||||
def _task_received(self, message):
|
||||
task = message.body or {}
|
||||
message_id = message.id
|
||||
do_ack = False
|
||||
reporter = None
|
||||
|
||||
with self.create_rmq_client() as mq:
|
||||
try:
|
||||
|
||||
secure_task = TokenSanitizer().sanitize(task)
|
||||
log.info('Starting processing task {0}: {1}'.format(
|
||||
message_id, anyjson.dumps(secure_task)))
|
||||
reporter = reporting.Reporter(mq, message_id, task['id'])
|
||||
|
||||
metadata_version = metadata.get_metadata(task['id'],
|
||||
task['token'],
|
||||
task['tenant_id'])
|
||||
command_dispatcher = CommandDispatcher('e' + task['id'], mq,
|
||||
task['token'],
|
||||
task['tenant_id'],
|
||||
reporter)
|
||||
|
||||
workflows = []
|
||||
config = Config()
|
||||
for path in glob.glob(
|
||||
'{0}/workflows/*.xml'.format(metadata_version)):
|
||||
log.debug('Loading XML {0}'.format(path))
|
||||
workflow = Workflow(path, task, command_dispatcher, config,
|
||||
reporter, metadata_version)
|
||||
workflows.append(workflow)
|
||||
|
||||
stop = False
|
||||
while not stop:
|
||||
try:
|
||||
for workflow in workflows:
|
||||
workflow.prepare()
|
||||
while True:
|
||||
result = False
|
||||
for workflow in workflows:
|
||||
if workflow.execute():
|
||||
result = True
|
||||
if not result:
|
||||
log.debug(
|
||||
"No rules matched, "
|
||||
"will now execute pending commands")
|
||||
break
|
||||
if not command_dispatcher.execute_pending():
|
||||
log.debug("No pending commands found, "
|
||||
"seems like we are done")
|
||||
break
|
||||
if self.check_stop_requested(task):
|
||||
log.info("Workflow stop requested")
|
||||
stop = True
|
||||
except Exception as ex:
|
||||
reporter.report_generic(
|
||||
"Unexpected error has occurred", ex.message,
|
||||
'error')
|
||||
log.exception(ex)
|
||||
break
|
||||
command_dispatcher.close()
|
||||
if stop:
|
||||
log.info("Workflow stopped by 'stop' command")
|
||||
do_ack = True
|
||||
metadata.release(task['id'])
|
||||
except Exception as ex:
|
||||
log.exception(ex)
|
||||
log.debug("Non-processable message detected, "
|
||||
"will ack message")
|
||||
do_ack = True
|
||||
finally:
|
||||
if do_ack:
|
||||
self.cleanup(task, reporter)
|
||||
result_msg = Message()
|
||||
result_msg.body = task
|
||||
result_msg.id = message_id
|
||||
|
||||
mq.send(message=result_msg, key='task-results')
|
||||
message.ack()
|
||||
|
||||
log.info('Finished processing task {0}. Result = {1}'.format(
|
||||
message_id, anyjson.dumps(TokenSanitizer().sanitize(task))))
|
||||
|
||||
def cleanup(self, model, reporter):
|
||||
try:
|
||||
if 'token' in model:
|
||||
del model['token']
|
||||
|
||||
if 'temp' in model:
|
||||
del model['temp']
|
||||
|
||||
services = model.get('services', [])
|
||||
for service in services:
|
||||
if 'temp' in service:
|
||||
del service['temp']
|
||||
|
||||
units = service.get('units', [])
|
||||
for unit in units:
|
||||
if 'temp' in unit:
|
||||
del unit['temp']
|
||||
except Exception as e:
|
||||
log.exception("Unexpected exception has occurred")
|
||||
if reporter:
|
||||
reporter.report_generic("Unexpected error has occurred",
|
||||
e.message, 'error')
|
||||
|
||||
def check_stop_requested(self, model):
|
||||
if 'temp' in model:
|
||||
if '_stop_requested' in model['temp']:
|
||||
return model['temp']['_stop_requested']
|
||||
return False
|
@ -1,190 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
from os.path import basename
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
import datetime
|
||||
import xml_code_engine
|
||||
from openstack.common import log as logging
|
||||
from muranoconductor import config as cfg
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def update_cf_stack(engine, context, body, template, result=None, error=None,
|
||||
**kwargs):
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
metadata_id = context['/metadata_id']
|
||||
|
||||
def callback(result_value, error_result=None):
|
||||
if result is not None:
|
||||
context[result] = result_value
|
||||
|
||||
if error_result is not None:
|
||||
if error is not None:
|
||||
context[error] = {
|
||||
'message': getattr(error_result, 'message', None),
|
||||
'strerror': getattr(error_result, 'strerror', None),
|
||||
'timestamp': datetime.datetime.now().isoformat()
|
||||
}
|
||||
failure_handler = body.find('failure')
|
||||
if failure_handler is not None:
|
||||
log.warning("Handling exception in failure block",
|
||||
exc_info=True)
|
||||
engine.evaluate_content(failure_handler, context)
|
||||
return
|
||||
else:
|
||||
log.error("No failure block found for exception",
|
||||
exc_info=True)
|
||||
raise error_result
|
||||
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
|
||||
command_dispatcher.execute(
|
||||
name='cf', command='CreateOrUpdate', template=template,
|
||||
mappings=(kwargs.get('mappings') or {}),
|
||||
arguments=(kwargs.get('arguments') or {}),
|
||||
callback=callback,
|
||||
metadata_id=metadata_id)
|
||||
|
||||
|
||||
def delete_cf_stack(engine, context, body, **kwargs):
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
|
||||
def callback(result_value):
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
|
||||
command_dispatcher.execute(
|
||||
name='cf', command='Delete', callback=callback)
|
||||
|
||||
|
||||
def prepare_user_data(context, hostname, service, unit,
|
||||
template='Default', initFile='init.ps1', **kwargs):
|
||||
settings = cfg.CONF.rabbitmq
|
||||
path_to_init_file = '{0}/{1}'.format(basename(cfg.CONF.init_scripts_dir),
|
||||
initFile)
|
||||
with open(path_to_init_file) as init_script_file:
|
||||
with open('{0}/{1}.template'.format(
|
||||
basename(cfg.CONF.agent_config_dir), template)
|
||||
) as template_file:
|
||||
init_script = init_script_file.read()
|
||||
template_data = template_file.read()
|
||||
|
||||
replacements = {
|
||||
'%RABBITMQ_HOST%': settings.host,
|
||||
'%RABBITMQ_PORT%': settings.port,
|
||||
'%RABBITMQ_INPUT_QUEUE%': '-'.join(
|
||||
['e' + str(context['/dataSource']['id']),
|
||||
str(service), str(unit)]).lower(),
|
||||
'%RESULT_QUEUE%': '-execution-results-e{0}'.format(
|
||||
str(context['/dataSource']['id'])).lower(),
|
||||
'%RABBITMQ_USER%': settings.login,
|
||||
'%RABBITMQ_PASSWORD%': settings.password,
|
||||
'%RABBITMQ_VHOST%': settings.virtual_host,
|
||||
'%RABBITMQ_SSL%': 'true' if settings.ssl else 'false'
|
||||
}
|
||||
|
||||
template_data = set_config_params(template_data, replacements)
|
||||
|
||||
init_script = init_script.replace(
|
||||
'%AGENT_CONFIG_BASE64%',
|
||||
base64.b64encode(template_data))
|
||||
|
||||
init_script = init_script.replace('%INTERNAL_HOSTNAME%', hostname)
|
||||
init_script = init_script.replace(
|
||||
'%MURANO_SERVER_ADDRESS%',
|
||||
cfg.CONF.file_server or settings.host)
|
||||
|
||||
init_script = init_script.replace(
|
||||
'%CA_ROOT_CERT_BASE64%',
|
||||
get_ca_certificate())
|
||||
|
||||
return init_script
|
||||
|
||||
|
||||
def set_config_params(template_data, replacements):
|
||||
for key in replacements:
|
||||
template_data = template_data.replace(key, str(replacements[key]))
|
||||
return template_data
|
||||
|
||||
|
||||
def get_ca_certificate():
|
||||
ca_file = (cfg.CONF.rabbitmq.ca_certs or '').strip()
|
||||
if not ca_file:
|
||||
return ''
|
||||
with open(ca_file) as stream:
|
||||
return stream.read().encode('base64')
|
||||
|
||||
|
||||
counters = {}
|
||||
|
||||
|
||||
def int2base(x, base):
|
||||
digs = string.digits + string.lowercase
|
||||
if x < 0:
|
||||
sign = -1
|
||||
elif x == 0:
|
||||
return '0'
|
||||
else:
|
||||
sign = 1
|
||||
x *= sign
|
||||
digits = []
|
||||
while x:
|
||||
digits.append(digs[x % base])
|
||||
x /= base
|
||||
if sign < 0:
|
||||
digits.append('-')
|
||||
digits.reverse()
|
||||
return ''.join(digits)
|
||||
|
||||
|
||||
def generate_hostname(pattern, service_id, **kwargs):
|
||||
if not pattern:
|
||||
return _generate_random_hostname()
|
||||
elif '#' in pattern:
|
||||
counter = counters.get(service_id) or 1
|
||||
counters[service_id] = counter + 1
|
||||
return pattern.replace('#', str(counter), 1)
|
||||
else:
|
||||
return pattern
|
||||
|
||||
|
||||
def _generate_random_hostname():
|
||||
counter = counters.get('') or 1
|
||||
prefix = ''.join(random.choice(string.lowercase) for _ in range(5))
|
||||
timestamp = int2base(int(time.time() * 1000), 36)[:8]
|
||||
suffix = int2base(counter, 36)
|
||||
counters[''] = (counter + 1) % 1296
|
||||
return prefix + timestamp + suffix
|
||||
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
update_cf_stack, "update-cf-stack")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
delete_cf_stack, "delete-cf-stack")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
prepare_user_data, "prepare-user-data")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
generate_hostname, "generate-hostname")
|
@ -1,51 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# If ../muranoconductor/__init__.py exists, add ../ to Python search path, so
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir,
|
||||
'muranoconductor',
|
||||
'__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from muranoconductor import config
|
||||
from muranoconductor.openstack.common import log
|
||||
from muranoconductor.openstack.common import service
|
||||
from muranoconductor.app import ConductorWorkflowService
|
||||
from muranoconductor import metadata
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
config.parse_args()
|
||||
metadata.prepare(config.CONF.data_dir)
|
||||
log.setup('conductor')
|
||||
launcher = service.ServiceLauncher()
|
||||
launcher.launch_service(ConductorWorkflowService())
|
||||
launcher.wait()
|
||||
except RuntimeError, e:
|
||||
sys.stderr.write("ERROR: %s\n" % e)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,16 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import command
|
@ -1,243 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import anyjson
|
||||
import eventlet
|
||||
import types
|
||||
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
import muranoconductor.helpers
|
||||
from command import CommandBase
|
||||
import muranoconductor.config
|
||||
from heatclient.client import Client
|
||||
import heatclient.exc
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HeatExecutor(CommandBase):
|
||||
def __init__(self, stack, token, tenant_id, reporter):
|
||||
self._update_pending_list = []
|
||||
self._delete_pending_list = []
|
||||
self._stack = stack
|
||||
self._reporter = reporter
|
||||
|
||||
keystone_settings = muranoconductor.config.CONF.keystone
|
||||
heat_settings = muranoconductor.config.CONF.heat
|
||||
|
||||
client = ksclient.Client(
|
||||
endpoint=keystone_settings.auth_url,
|
||||
cacert=keystone_settings.ca_file or None,
|
||||
cert=keystone_settings.cert_file or None,
|
||||
key=keystone_settings.key_file or None,
|
||||
insecure=keystone_settings.insecure)
|
||||
|
||||
if not client.authenticate(
|
||||
auth_url=keystone_settings.auth_url,
|
||||
tenant_id=tenant_id,
|
||||
token=token):
|
||||
raise heatclient.exc.HTTPUnauthorized()
|
||||
|
||||
heat_url = client.service_catalog.url_for(
|
||||
service_type='orchestration',
|
||||
endpoint_type=heat_settings.endpoint_type)
|
||||
|
||||
self._heat_client = Client(
|
||||
'1',
|
||||
heat_url,
|
||||
username='badusername',
|
||||
password='badpassword',
|
||||
token_only=True,
|
||||
token=client.auth_token,
|
||||
ca_file=heat_settings.ca_file or None,
|
||||
cert_file=heat_settings.cert_file or None,
|
||||
key_file=heat_settings.key_file or None,
|
||||
insecure=heat_settings.insecure)
|
||||
|
||||
def execute(self, command, callback, **kwargs):
|
||||
log.debug('Got command {0} on stack {1}'.format(command, self._stack))
|
||||
|
||||
if command == 'CreateOrUpdate':
|
||||
return self._execute_create_update(
|
||||
kwargs['template'],
|
||||
muranoconductor.helpers.str2unicode(
|
||||
kwargs.get('mappings') or {}),
|
||||
muranoconductor.helpers.str2unicode(
|
||||
kwargs.get('arguments') or {}),
|
||||
callback,
|
||||
kwargs['metadata_id'])
|
||||
elif command == 'Delete':
|
||||
return self._execute_delete(callback)
|
||||
|
||||
def _execute_create_update(self, template, mappings,
|
||||
arguments, callback, metadata_id):
|
||||
template_path = '{0}/templates/cf/{1}.template'.format(metadata_id,
|
||||
template)
|
||||
with open(template_path) as template_file:
|
||||
template_data = template_file.read()
|
||||
|
||||
template_data = muranoconductor.helpers.transform_json(
|
||||
anyjson.loads(template_data), mappings)
|
||||
|
||||
self._update_pending_list.append({
|
||||
'template': template_data,
|
||||
'arguments': arguments,
|
||||
'callback': callback
|
||||
})
|
||||
|
||||
def _execute_delete(self, callback):
|
||||
self._delete_pending_list.append({
|
||||
'callback': callback
|
||||
})
|
||||
|
||||
def has_pending_commands(self):
|
||||
return len(self._update_pending_list) + len(
|
||||
self._delete_pending_list) > 0
|
||||
|
||||
def execute_pending(self):
|
||||
# wait for the stack not to be IN_PROGRESS
|
||||
self._wait_state(lambda status: True)
|
||||
r1 = self._execute_pending_updates()
|
||||
r2 = self._execute_pending_deletes()
|
||||
return r1 or r2
|
||||
|
||||
def _execute_pending_updates(self):
|
||||
if not len(self._update_pending_list):
|
||||
return False
|
||||
|
||||
try:
|
||||
template, arguments = self._get_current_template()
|
||||
stack_exists = (template != {})
|
||||
# do not need to merge with current stack cause we rebuilding it
|
||||
# from scratch on every deployment
|
||||
template, arguments = ({}, {})
|
||||
|
||||
for t in self._update_pending_list:
|
||||
template = muranoconductor.helpers.merge_dicts(template,
|
||||
t['template'])
|
||||
arguments = muranoconductor.helpers.merge_dicts(arguments,
|
||||
t['arguments'],
|
||||
max_levels=1)
|
||||
log.info(
|
||||
'Executing heat template {0} with arguments {1} on stack {2}'
|
||||
.format(anyjson.dumps(template), arguments, self._stack))
|
||||
|
||||
if stack_exists:
|
||||
self._heat_client.stacks.update(
|
||||
stack_id=self._stack,
|
||||
parameters=arguments,
|
||||
template=template)
|
||||
log.debug(
|
||||
'Waiting for the stack {0} to be update'.format(
|
||||
self._stack))
|
||||
outs = self._wait_state(
|
||||
lambda status: status == 'UPDATE_COMPLETE')
|
||||
log.info('Stack {0} updated'.format(self._stack))
|
||||
else:
|
||||
self._heat_client.stacks.create(
|
||||
stack_name=self._stack,
|
||||
parameters=arguments,
|
||||
template=template,
|
||||
disable_rollback=False)
|
||||
|
||||
log.debug('Waiting for the stack {0} to be create'.format(
|
||||
self._stack))
|
||||
outs = self._wait_state(
|
||||
lambda status: status == 'CREATE_COMPLETE')
|
||||
log.info('Stack {0} created'.format(self._stack))
|
||||
|
||||
pending_list = self._update_pending_list
|
||||
self._update_pending_list = []
|
||||
|
||||
for item in pending_list:
|
||||
item['callback'](outs)
|
||||
return True
|
||||
except Exception as ex:
|
||||
pending_list = self._update_pending_list
|
||||
self._update_pending_list = []
|
||||
for item in pending_list:
|
||||
item['callback'](None, ex)
|
||||
return True
|
||||
|
||||
def _execute_pending_deletes(self):
|
||||
if not len(self._delete_pending_list):
|
||||
return False
|
||||
|
||||
log.debug('Deleting stack {0}'.format(self._stack))
|
||||
try:
|
||||
self._heat_client.stacks.delete(
|
||||
stack_id=self._stack)
|
||||
log.debug(
|
||||
'Waiting for the stack {0} to be deleted'.format(self._stack))
|
||||
self._wait_state(
|
||||
lambda status: status in ('DELETE_COMPLETE', 'NOT_FOUND'))
|
||||
log.info('Stack {0} deleted'.format(self._stack))
|
||||
except Exception as ex:
|
||||
log.exception(ex)
|
||||
|
||||
pending_list = self._delete_pending_list
|
||||
self._delete_pending_list = []
|
||||
|
||||
for item in pending_list:
|
||||
item['callback'](True)
|
||||
return True
|
||||
|
||||
def _get_current_template(self):
|
||||
try:
|
||||
stack_info = self._heat_client.stacks.get(stack_id=self._stack)
|
||||
template = self._heat_client.stacks.template(
|
||||
stack_id='{0}/{1}'.format(
|
||||
stack_info.stack_name,
|
||||
stack_info.id))
|
||||
return template, stack_info.parameters
|
||||
except heatclient.exc.HTTPNotFound:
|
||||
return {}, {}
|
||||
|
||||
def _wait_state(self, status_func):
|
||||
tries = 4
|
||||
delay = 1
|
||||
while tries > 0:
|
||||
while True:
|
||||
try:
|
||||
stack_info = self._heat_client.stacks.get(
|
||||
stack_id=self._stack)
|
||||
status = stack_info.stack_status
|
||||
tries = 4
|
||||
delay = 1
|
||||
except heatclient.exc.HTTPNotFound:
|
||||
stack_info = None
|
||||
status = 'NOT_FOUND'
|
||||
except Exception:
|
||||
tries -= 1
|
||||
delay *= 2
|
||||
if not tries:
|
||||
raise
|
||||
eventlet.sleep(delay)
|
||||
break
|
||||
|
||||
if 'IN_PROGRESS' in status:
|
||||
eventlet.sleep(2)
|
||||
continue
|
||||
if not status_func(status):
|
||||
raise EnvironmentError(
|
||||
"Unexpected stack state {0}".format(status))
|
||||
|
||||
try:
|
||||
return dict([(t['output_key'], t['output_value'])
|
||||
for t in stack_info.outputs])
|
||||
except Exception:
|
||||
return {}
|
||||
return {}
|
@ -1,28 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class CommandBase(object):
|
||||
def execute(self, **kwargs):
|
||||
pass
|
||||
|
||||
def execute_pending(self):
|
||||
return False
|
||||
|
||||
def has_pending_commands(self):
|
||||
return False
|
||||
|
||||
def close(self):
|
||||
pass
|
@ -1,54 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import command
|
||||
import cloud_formation
|
||||
import network
|
||||
import vm_agent
|
||||
from muranoconductor import config as cfg
|
||||
|
||||
|
||||
class CommandDispatcher(command.CommandBase):
|
||||
def __init__(self, environment, rmqclient, token, tenant_id, reporter):
|
||||
self._command_map = {
|
||||
'cf': cloud_formation.HeatExecutor(environment, token, tenant_id,
|
||||
reporter),
|
||||
'agent': vm_agent.VmAgentExecutor(
|
||||
environment, rmqclient, reporter),
|
||||
}
|
||||
if cfg.CONF.network_topology != "nova":
|
||||
self._command_map['net'] = \
|
||||
network.NeutronExecutor(tenant_id, token)
|
||||
|
||||
def execute(self, name, **kwargs):
|
||||
self._command_map[name].execute(**kwargs)
|
||||
|
||||
def execute_pending(self):
|
||||
result = False
|
||||
for command in self._command_map.values():
|
||||
result |= command.execute_pending()
|
||||
|
||||
return result
|
||||
|
||||
def has_pending_commands(self):
|
||||
result = False
|
||||
for command in self._command_map.values():
|
||||
result |= command.has_pending_commands()
|
||||
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
for t in self._command_map.values():
|
||||
t.close()
|
@ -1,231 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import math
|
||||
import muranoconductor.config
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
import netaddr
|
||||
from netaddr.strategy import ipv4
|
||||
from neutronclient.v2_0 import client as client
|
||||
from muranoconductor.commands.command import CommandBase
|
||||
|
||||
|
||||
class NeutronExecutor(CommandBase):
|
||||
def __init__(self, tenant_id, token):
|
||||
keystone_settings = muranoconductor.config.CONF.keystone
|
||||
neutron_settings = muranoconductor.config.CONF.neutron
|
||||
|
||||
self.env_count = muranoconductor.config.CONF.max_environments
|
||||
self.host_count = muranoconductor.config.CONF.max_hosts
|
||||
self.address = muranoconductor.config.CONF.env_ip_template
|
||||
|
||||
self.cidr_waiting_per_router = {}
|
||||
self.cidr_waiting_per_network = {}
|
||||
self.router_requests = []
|
||||
self.network_requests = []
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
keystone_client = ksclient.Client(
|
||||
endpoint=keystone_settings.auth_url,
|
||||
cacert=keystone_settings.ca_file or None,
|
||||
cert=keystone_settings.cert_file or None,
|
||||
key=keystone_settings.key_file or None,
|
||||
insecure=keystone_settings.insecure)
|
||||
|
||||
if not keystone_client.authenticate(
|
||||
auth_url=keystone_settings.auth_url,
|
||||
tenant_id=tenant_id,
|
||||
token=token):
|
||||
raise client.exceptions.Unauthorized()
|
||||
|
||||
neutron_url = keystone_client.service_catalog.url_for(
|
||||
service_type='network',
|
||||
endpoint_type=neutron_settings.endpoint_type)
|
||||
self.neutron = client.Client(endpoint_url=neutron_url,
|
||||
token=token,
|
||||
ca_cert=neutron_settings.ca_cert or None,
|
||||
insecure=neutron_settings.insecure)
|
||||
|
||||
self.command_map = {
|
||||
"get_new_subnet": self._schedule_get_new_subnet,
|
||||
"get_existing_subnet": self._schedule_get_existing_subnet,
|
||||
"get_router": self._schedule_get_router,
|
||||
"get_network": self._schedule_get_network
|
||||
}
|
||||
|
||||
def execute(self, command, callback, **kwargs):
|
||||
if command in self.command_map:
|
||||
self.command_map[command](callback, **kwargs)
|
||||
|
||||
def has_pending_commands(self):
|
||||
return len(self.cidr_waiting_per_router) + len(
|
||||
self.cidr_waiting_per_network) + len(self.router_requests) + len(
|
||||
self.network_requests) > 0
|
||||
|
||||
def execute_pending(self):
|
||||
r1 = self._execute_pending_new_cidr_requests()
|
||||
r2 = self._execute_pending_net_requests()
|
||||
r3 = self._execute_pending_router_requests()
|
||||
r4 = self._execute_pending_existing_cidr_requests()
|
||||
return r1 or r2 or r3 or r4
|
||||
|
||||
def _execute_pending_new_cidr_requests(self):
|
||||
if not len(self.cidr_waiting_per_router):
|
||||
return False
|
||||
for router, callbacks in self.cidr_waiting_per_router.items():
|
||||
results = self._get_subnet(router, len(callbacks))
|
||||
for callback, result in zip(callbacks, results):
|
||||
callback(result)
|
||||
self.cidr_waiting_per_router = {}
|
||||
return True
|
||||
|
||||
def _execute_pending_existing_cidr_requests(self):
|
||||
if not len(self.cidr_waiting_per_network):
|
||||
return False
|
||||
for network, callbacks in self.cidr_waiting_per_network.items():
|
||||
result = self._get_existing_subnet(network)
|
||||
for callback in callbacks:
|
||||
callback(result)
|
||||
self.cidr_waiting_per_network = {}
|
||||
return True
|
||||
|
||||
def _execute_pending_router_requests(self):
|
||||
if not len(self.router_requests):
|
||||
return False
|
||||
|
||||
routers = self.neutron.list_routers(tenant_id=self.tenant_id). \
|
||||
get("routers")
|
||||
if not len(routers):
|
||||
routerId = externalNetId = "NOT_FOUND"
|
||||
else:
|
||||
routerId = routers[0]["id"]
|
||||
externalNetId = routers[0]['external_gateway_info']['network_id']
|
||||
|
||||
if len(routers) > 1:
|
||||
for router in routers:
|
||||
if "murano" in router["name"].lower():
|
||||
routerId = router["id"]
|
||||
externalNetId = \
|
||||
router['external_gateway_info']['network_id']
|
||||
break
|
||||
|
||||
for callback in self.router_requests:
|
||||
callback(routerId, externalNetId)
|
||||
self.router_requests = []
|
||||
return True
|
||||
|
||||
def _execute_pending_net_requests(self):
|
||||
if not len(self.network_requests):
|
||||
return False
|
||||
|
||||
nets = self.neutron.list_networks()["networks"]
|
||||
if not len(nets):
|
||||
netId = None
|
||||
else:
|
||||
netId = nets[0]["id"]
|
||||
if len(nets) > 1:
|
||||
murano_id = None
|
||||
ext_id = None
|
||||
shared_id = None
|
||||
for net in nets:
|
||||
if "murano" in net.get("name").lower():
|
||||
murano_id = net["id"]
|
||||
break
|
||||
if net.get("router:external") and not ext_id:
|
||||
ext_id = net["id"]
|
||||
if net.get("shared") and not shared_id:
|
||||
shared_id = net["id"]
|
||||
if murano_id:
|
||||
netId = murano_id
|
||||
elif ext_id:
|
||||
netId = ext_id
|
||||
elif shared_id:
|
||||
netId = shared_id
|
||||
for callback in self.network_requests:
|
||||
callback(netId)
|
||||
self.network_requests = []
|
||||
return True
|
||||
|
||||
def _get_subnet(self, routerId, count):
|
||||
if routerId == "*":
|
||||
routerId = None
|
||||
if routerId:
|
||||
taken_cidrs = self._get_taken_cidrs_by_router(routerId)
|
||||
else:
|
||||
taken_cidrs = self._get_all_taken_cidrs()
|
||||
results = []
|
||||
for i in range(0, count):
|
||||
res = self._generate_cidr(taken_cidrs)
|
||||
results.append(res)
|
||||
taken_cidrs.append(res)
|
||||
return results
|
||||
|
||||
def _get_existing_subnet(self, network_id):
|
||||
subnets = self.neutron.list_subnets(network_id=network_id)['subnets']
|
||||
if not subnets:
|
||||
return None
|
||||
else:
|
||||
return subnets[0]['cidr']
|
||||
|
||||
def _get_taken_cidrs_by_router(self, routerId):
|
||||
ports = self.neutron.list_ports(device_id=routerId)["ports"]
|
||||
subnet_ids = []
|
||||
for port in ports:
|
||||
for fixed_ip in port["fixed_ips"]:
|
||||
subnet_ids.append(fixed_ip["subnet_id"])
|
||||
|
||||
all_subnets = self.neutron.list_subnets()["subnets"]
|
||||
filtered_cidrs = [subnet["cidr"] for subnet in all_subnets if
|
||||
subnet["id"] in subnet_ids]
|
||||
|
||||
return filtered_cidrs
|
||||
|
||||
def _get_all_taken_cidrs(self):
|
||||
return [subnet["cidr"] for subnet in
|
||||
self.neutron.list_subnets()["subnets"]]
|
||||
|
||||
def _generate_cidr(self, taken_cidrs):
|
||||
bits_for_envs = int(math.ceil(math.log(self.env_count, 2)))
|
||||
bits_for_hosts = int(math.ceil(math.log(self.host_count, 2)))
|
||||
width = ipv4.width
|
||||
mask_width = width - bits_for_hosts - bits_for_envs
|
||||
net = netaddr.IPNetwork(self.address + "/" + str(mask_width))
|
||||
for subnet in net.subnet(width - bits_for_hosts):
|
||||
if str(subnet) in taken_cidrs:
|
||||
continue
|
||||
return str(subnet)
|
||||
return None
|
||||
|
||||
def _schedule_get_new_subnet(self, callback, **kwargs):
|
||||
routerId = kwargs.get("routerId")
|
||||
if not routerId:
|
||||
routerId = "*"
|
||||
if routerId in self.cidr_waiting_per_router:
|
||||
self.cidr_waiting_per_router[routerId].append(callback)
|
||||
else:
|
||||
self.cidr_waiting_per_router[routerId] = [callback]
|
||||
|
||||
def _schedule_get_existing_subnet(self, callback, **kwargs):
|
||||
existing_network = kwargs.get("existingNetwork")
|
||||
|
||||
if existing_network in self.cidr_waiting_per_network:
|
||||
self.cidr_waiting_per_network[existing_network].append(callback)
|
||||
else:
|
||||
self.cidr_waiting_per_network[existing_network] = [callback]
|
||||
|
||||
def _schedule_get_router(self, callback, **kwargs):
|
||||
self.router_requests.append(callback)
|
||||
|
||||
def _schedule_get_network(self, callback, **kwargs):
|
||||
self.network_requests.append(callback)
|
@ -1,221 +0,0 @@
|
||||
import uuid
|
||||
import yaml
|
||||
import os
|
||||
import types
|
||||
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranocommon.messaging import Message
|
||||
import muranoconductor.helpers
|
||||
from command import CommandBase
|
||||
from muranocommon.helpers.token_sanitizer import TokenSanitizer
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VmAgentExecutor(CommandBase):
|
||||
def __init__(self, stack, rmqclient, reporter):
|
||||
self._stack = stack
|
||||
self._rmqclient = rmqclient
|
||||
self._pending_list = []
|
||||
self._results_queue = '-execution-results-%s' % str(stack).lower()
|
||||
self._reporter = reporter
|
||||
rmqclient.declare(self._results_queue, enable_ha=True, ttl=86400000)
|
||||
|
||||
def execute(self, template, mappings, unit, service, callback, metadata_id,
|
||||
timeout=None):
|
||||
template_path = '{0}/templates/agent/{1}.template'.format(metadata_id,
|
||||
template)
|
||||
|
||||
#with open(template_path) as t_file:
|
||||
# template_data = t_file.read()
|
||||
#
|
||||
#json_template = json.loads(template_data)
|
||||
#json_template = self.encode_scripts(json_template, template_path)
|
||||
template, msg_id = self.build_execution_plan(template_path)
|
||||
|
||||
template = muranoconductor.helpers.transform_json(
|
||||
template, mappings)
|
||||
|
||||
queue = ('%s-%s-%s' % (self._stack, service, unit)).lower()
|
||||
self._pending_list.append({
|
||||
'id': msg_id,
|
||||
'callback': callback,
|
||||
'timeout': timeout
|
||||
})
|
||||
|
||||
msg = Message()
|
||||
msg.body = template
|
||||
msg.id = msg_id
|
||||
self._rmqclient.declare(queue, enable_ha=True, ttl=86400000)
|
||||
self._rmqclient.send(message=msg, key=queue)
|
||||
log.info('Sending RMQ message {0} to {1} with id {2}'.format(
|
||||
TokenSanitizer().sanitize(template), queue, msg_id))
|
||||
|
||||
def build_execution_plan(self, path):
|
||||
with open(path) as stream:
|
||||
template = yaml.load(stream)
|
||||
if not isinstance(template, types.DictionaryType):
|
||||
raise ValueError('Incorrect execution plan ' + path)
|
||||
format_version = template.get('FormatVersion')
|
||||
if not format_version or format_version.startswith('1.'):
|
||||
return self._build_v1_execution_plan(template, path)
|
||||
else:
|
||||
return self._build_v2_execution_plan(template, path)
|
||||
|
||||
def _split_path(self, _path, parts=None):
|
||||
if parts is None:
|
||||
parts = []
|
||||
head, tail = os.path.split(_path)
|
||||
if tail:
|
||||
parts.append(tail)
|
||||
elif os.path.isabs(head): # head is '/' and tail is '' - stop
|
||||
parts.append(head)
|
||||
head = None
|
||||
if head:
|
||||
return self._split_path(head, parts)
|
||||
else:
|
||||
parts.reverse()
|
||||
return parts
|
||||
|
||||
@staticmethod
|
||||
def _join(*args):
|
||||
return os.path.join(*args) if args else ''
|
||||
|
||||
def _split_agent_path(self, path, agent_root_dir_depth=3):
|
||||
agent_subdir = os.path.dirname(os.path.normpath(path))
|
||||
dir_parts = self._split_path(agent_subdir)
|
||||
return (self._join(*dir_parts[:agent_root_dir_depth]),
|
||||
self._join(*dir_parts[agent_root_dir_depth:]))
|
||||
|
||||
def _ensure_relpath(self, path):
|
||||
parts = self._split_path(os.path.normpath(path))
|
||||
if parts and os.path.isabs(parts[0]):
|
||||
return self._join(*parts[1:]), True
|
||||
else:
|
||||
return path, False
|
||||
|
||||
def _build_v1_execution_plan(self, template, path):
|
||||
agent_dir_root, rest_dirs = self._split_agent_path(path)
|
||||
scripts_folder = os.path.join(agent_dir_root, 'scripts')
|
||||
script_files = template.get('Scripts', [])
|
||||
scripts = []
|
||||
for script in script_files:
|
||||
script, was_abspath = self._ensure_relpath(script)
|
||||
if was_abspath:
|
||||
script_path = os.path.join(scripts_folder, script)
|
||||
else:
|
||||
script_path = os.path.join(scripts_folder, rest_dirs, script)
|
||||
log.debug('Loading script "{0}"'.format(script_path))
|
||||
with open(script_path) as script_file:
|
||||
script_data = script_file.read()
|
||||
scripts.append(script_data.encode('base64'))
|
||||
template['Scripts'] = scripts
|
||||
return template, uuid.uuid4().hex
|
||||
|
||||
def _build_v2_execution_plan(self, template, path):
|
||||
scripts_folder = os.path.join(
|
||||
os.path.dirname(path), 'scripts')
|
||||
plan_id = uuid.uuid4().hex
|
||||
template['ID'] = plan_id
|
||||
if 'Action' not in template:
|
||||
template['Action'] = 'Execute'
|
||||
if 'Files' not in template:
|
||||
template['Files'] = {}
|
||||
|
||||
files = {}
|
||||
for file_id, file_descr in template['Files'].items():
|
||||
files[file_descr['Name']] = file_id
|
||||
for name, script in template.get('Scripts', {}).items():
|
||||
if 'EntryPoint' not in script:
|
||||
raise ValueError('No entry point in script ' + name)
|
||||
script['EntryPoint'] = self._place_file(
|
||||
scripts_folder, script['EntryPoint'], template, files)
|
||||
if 'Files' in script:
|
||||
for i in range(0, len(script['Files'])):
|
||||
script['Files'][i] = self._place_file(
|
||||
scripts_folder, script['Files'][i], template, files)
|
||||
|
||||
return template, plan_id
|
||||
|
||||
def _place_file(self, folder, name, template, files):
|
||||
use_base64 = False
|
||||
if name.startswith('<') and name.endswith('>'):
|
||||
use_base64 = True
|
||||
name = name[1:len(name) - 1]
|
||||
if name in files:
|
||||
return files[name]
|
||||
|
||||
file_id = uuid.uuid4().hex
|
||||
body_type = 'Base64' if use_base64 else 'Text'
|
||||
with open(os.path.join(folder, name)) as stream:
|
||||
body = stream.read()
|
||||
if use_base64:
|
||||
body = body.encode('base64')
|
||||
|
||||
template['Files'][file_id] = {
|
||||
'Name': name,
|
||||
'BodyType': body_type,
|
||||
'Body': body
|
||||
}
|
||||
files[name] = file_id
|
||||
return file_id
|
||||
|
||||
def has_pending_commands(self):
|
||||
return len(self._pending_list) > 0
|
||||
|
||||
def execute_pending(self):
|
||||
if not self.has_pending_commands():
|
||||
return False
|
||||
|
||||
with self._rmqclient.open(self._results_queue) as subscription:
|
||||
while self.has_pending_commands():
|
||||
# TODO: Add extended initialization timeout
|
||||
# By now, all the timeouts are defined by the command input
|
||||
# however, the first reply which we wait for being returned
|
||||
# from the unit may be delayed due to long unit initialization
|
||||
# and startup. So, for the nonitialized units we need to extend
|
||||
# the command's timeout with the initialization timeout
|
||||
timeout = self.get_max_timeout()
|
||||
if timeout:
|
||||
span_message = "for {0} seconds".format(timeout)
|
||||
else:
|
||||
span_message = 'infinitely'
|
||||
log.debug("Waiting %s for responses to be returned"
|
||||
" by the agent. %i total responses remain",
|
||||
span_message, len(self._pending_list))
|
||||
msg = subscription.get_message(timeout=timeout)
|
||||
if msg:
|
||||
msg.ack()
|
||||
msg_id = msg.body.get('SourceID', msg.id)
|
||||
item, index = muranoconductor.helpers.find(
|
||||
lambda t: t['id'] == msg_id, self._pending_list)
|
||||
if item:
|
||||
self._pending_list.pop(index)
|
||||
item['callback'](msg.body)
|
||||
else:
|
||||
while self.has_pending_commands():
|
||||
item = self._pending_list.pop()
|
||||
item['callback'](AgentTimeoutException(timeout))
|
||||
return True
|
||||
|
||||
def get_max_timeout(self):
|
||||
res = 0
|
||||
for item in self._pending_list:
|
||||
if item['timeout'] is None: # if at least 1 item has no timeout
|
||||
return None # then return None (i.e. infinite)
|
||||
res = max(res, item['timeout'])
|
||||
return res
|
||||
|
||||
|
||||
class AgentTimeoutException(Exception):
|
||||
def __init__(self, timeout):
|
||||
self.message = "Unable to receive any response from the agent" \
|
||||
" in {0} sec".format(timeout)
|
||||
self.timeout = timeout
|
||||
|
||||
|
||||
class UnhandledAgentException(Exception):
|
||||
def __init__(self, errors):
|
||||
self.message = "An unhandled exception has " \
|
||||
"occurred in the Agent: {0}".format(errors)
|
||||
self.errors = errors
|
@ -1,240 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Routines for configuring Glance
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from oslo.config import cfg
|
||||
from paste import deploy
|
||||
|
||||
from muranoconductor import __version__ as version
|
||||
from muranoconductor.openstack.common import log
|
||||
|
||||
paste_deploy_opts = [
|
||||
cfg.StrOpt('flavor'),
|
||||
cfg.StrOpt('config_file'),
|
||||
]
|
||||
|
||||
directories = [
|
||||
cfg.StrOpt('data_dir', default=os.path.join(tempfile.gettempdir(),
|
||||
'muranoconductor-cache')),
|
||||
cfg.StrOpt('init_scripts_dir', default='etc/init-scripts'),
|
||||
cfg.StrOpt('agent_config_dir', default='etc/agent-config'),
|
||||
]
|
||||
|
||||
rabbit_opts = [
|
||||
cfg.StrOpt('host', default='localhost'),
|
||||
cfg.IntOpt('port', default=5672),
|
||||
cfg.StrOpt('login', default='guest'),
|
||||
cfg.StrOpt('password', default='guest'),
|
||||
cfg.StrOpt('virtual_host', default='/'),
|
||||
cfg.BoolOpt('ssl', default=False),
|
||||
cfg.StrOpt('ca_certs', default='')
|
||||
]
|
||||
|
||||
heat_opts = [
|
||||
cfg.BoolOpt('insecure', default=False),
|
||||
cfg.StrOpt('ca_file'),
|
||||
cfg.StrOpt('cert_file'),
|
||||
cfg.StrOpt('key_file'),
|
||||
cfg.StrOpt('endpoint_type', default='publicURL')
|
||||
]
|
||||
|
||||
neutron_opts = [
|
||||
cfg.BoolOpt('insecure', default=False),
|
||||
cfg.StrOpt('ca_cert'),
|
||||
cfg.StrOpt('endpoint_type', default='publicURL')
|
||||
]
|
||||
|
||||
keystone_opts = [
|
||||
cfg.StrOpt('auth_url'),
|
||||
cfg.BoolOpt('insecure', default=False),
|
||||
cfg.StrOpt('ca_file'),
|
||||
cfg.StrOpt('cert_file'),
|
||||
cfg.StrOpt('key_file')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
|
||||
CONF.register_opts(rabbit_opts, group='rabbitmq')
|
||||
CONF.register_opts(heat_opts, group='heat')
|
||||
CONF.register_opts(neutron_opts, group='neutron')
|
||||
CONF.register_opts(keystone_opts, group='keystone')
|
||||
CONF.register_opts(directories)
|
||||
CONF.register_opt(cfg.StrOpt('file_server'))
|
||||
CONF.register_cli_opt(cfg.StrOpt('murano_metadata_url'))
|
||||
|
||||
|
||||
CONF.register_opt(cfg.IntOpt('max_environments', default=20))
|
||||
CONF.register_opt(cfg.IntOpt('max_hosts', default=250))
|
||||
CONF.register_opt(cfg.StrOpt('env_ip_template', default='10.0.0.0'))
|
||||
CONF.register_opt(cfg.StrOpt('network_topology',
|
||||
choices=['nova', 'flat', 'routed'],
|
||||
default='routed'))
|
||||
|
||||
cfg.set_defaults(log.log_opts, default_log_levels=[
|
||||
'iso8601=WARN',
|
||||
'heatclient=WARN'
|
||||
])
|
||||
|
||||
|
||||
def parse_args(args=None, usage=None, default_config_files=None):
|
||||
CONF(args=args,
|
||||
project='conductor',
|
||||
version=version,
|
||||
usage=usage,
|
||||
default_config_files=default_config_files)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""
|
||||
Sets up the logging options for a log with supplied name
|
||||
"""
|
||||
|
||||
if CONF.log_config:
|
||||
# Use a logging configuration file for all settings...
|
||||
if os.path.exists(CONF.log_config):
|
||||
logging.config.fileConfig(CONF.log_config)
|
||||
return
|
||||
else:
|
||||
raise RuntimeError("Unable to locate specified logging "
|
||||
"config file: %s" % CONF.log_config)
|
||||
|
||||
root_logger = logging.root
|
||||
if CONF.debug:
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
elif CONF.verbose:
|
||||
root_logger.setLevel(logging.INFO)
|
||||
else:
|
||||
root_logger.setLevel(logging.WARNING)
|
||||
|
||||
formatter = logging.Formatter(CONF.log_format, CONF.log_date_format)
|
||||
|
||||
if CONF.use_syslog:
|
||||
try:
|
||||
facility = getattr(logging.handlers.SysLogHandler,
|
||||
CONF.syslog_log_facility)
|
||||
except AttributeError:
|
||||
raise ValueError(_("Invalid syslog facility"))
|
||||
|
||||
handler = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
elif CONF.log_file:
|
||||
logfile = CONF.log_file
|
||||
if CONF.log_dir:
|
||||
logfile = os.path.join(CONF.log_dir, logfile)
|
||||
handler = logging.handlers.WatchedFileHandler(logfile)
|
||||
else:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
root_logger.addHandler(handler)
|
||||
|
||||
|
||||
def _get_deployment_flavor():
|
||||
"""
|
||||
Retrieve the paste_deploy.flavor config item, formatted appropriately
|
||||
for appending to the application name.
|
||||
"""
|
||||
flavor = CONF.paste_deploy.flavor
|
||||
return '' if not flavor else ('-' + flavor)
|
||||
|
||||
|
||||
def _get_paste_config_path():
|
||||
paste_suffix = '-paste.ini'
|
||||
conf_suffix = '.conf'
|
||||
if CONF.config_file:
|
||||
# Assume paste config is in a paste.ini file corresponding
|
||||
# to the last config file
|
||||
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
|
||||
else:
|
||||
path = CONF.prog + '-paste.ini'
|
||||
return CONF.find_file(os.path.basename(path))
|
||||
|
||||
|
||||
def _get_deployment_config_file():
|
||||
"""
|
||||
Retrieve the deployment_config_file config item, formatted as an
|
||||
absolute pathname.
|
||||
"""
|
||||
path = CONF.paste_deploy.config_file
|
||||
if not path:
|
||||
path = _get_paste_config_path()
|
||||
if not path:
|
||||
msg = "Unable to locate paste config file for %s." % CONF.prog
|
||||
raise RuntimeError(msg)
|
||||
return os.path.abspath(path)
|
||||
|
||||
|
||||
def load_paste_app(app_name=None):
|
||||
"""
|
||||
Builds and returns a WSGI app from a paste config file.
|
||||
|
||||
We assume the last config file specified in the supplied ConfigOpts
|
||||
object is the paste config file.
|
||||
|
||||
:param app_name: name of the application to load
|
||||
|
||||
:raises RuntimeError when config file cannot be located or application
|
||||
cannot be loaded from config file
|
||||
"""
|
||||
if app_name is None:
|
||||
app_name = CONF.prog
|
||||
|
||||
# append the deployment flavor to the application name,
|
||||
# in order to identify the appropriate paste pipeline
|
||||
app_name += _get_deployment_flavor()
|
||||
|
||||
conf_file = _get_deployment_config_file()
|
||||
|
||||
try:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug(_("Loading %(app_name)s from %(conf_file)s"),
|
||||
{'conf_file': conf_file, 'app_name': app_name})
|
||||
|
||||
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
|
||||
|
||||
# Log the options used when starting if we're in debug mode...
|
||||
if CONF.debug:
|
||||
CONF.log_opt_values(logger, logging.DEBUG)
|
||||
|
||||
return app
|
||||
except (LookupError, ImportError), e:
|
||||
msg = _("Unable to load %(app_name)s from "
|
||||
"configuration file %(conf_file)s."
|
||||
"\nGot: %(e)r") % locals()
|
||||
logger.error(msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
class Config(object):
|
||||
def get_setting(self, section, name, default=None):
|
||||
group = CONF
|
||||
if section and section != 'DEFAULT':
|
||||
group = group.get(section, default)
|
||||
return group.get(name, default)
|
||||
|
||||
def __getitem__(self, item):
|
||||
parts = item.rsplit('.', 1)
|
||||
return self.get_setting(
|
||||
parts[0] if len(parts) == 2 else 'DEFAULT', parts[-1])
|
@ -1,67 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class Context(object):
|
||||
def __init__(self, parent=None):
|
||||
self._parent = parent
|
||||
self._data = None
|
||||
|
||||
def _get_data(self):
|
||||
if self._data is None:
|
||||
self._data = {} if self._parent is None \
|
||||
else self._parent._get_data().copy()
|
||||
return self._data
|
||||
|
||||
def __getitem__(self, item):
|
||||
context, path = self._parseContext(item)
|
||||
return context._get_data().get(path)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
context, path = self._parseContext(key)
|
||||
context._get_data()[path] = value
|
||||
|
||||
def _parseContext(self, path):
|
||||
context = self
|
||||
index = 0
|
||||
for c in path:
|
||||
if c == ':' and context._parent is not None:
|
||||
context = context._parent
|
||||
elif c == '/':
|
||||
while context._parent is not None:
|
||||
context = context._parent
|
||||
else:
|
||||
break
|
||||
|
||||
index += 1
|
||||
|
||||
return context, path[index:]
|
||||
|
||||
def assign_from(self, context, copy=False):
|
||||
self._parent = context._parent
|
||||
self._data = context._data
|
||||
if copy and self._data is not None:
|
||||
self._data = self._data.copy()
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self._parent
|
||||
|
||||
def __str__(self):
|
||||
if self._data is not None:
|
||||
return str(self._data)
|
||||
if self._parent:
|
||||
return str(self._parent)
|
||||
return str({})
|
@ -1,117 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import deep
|
||||
import types
|
||||
import re
|
||||
|
||||
|
||||
def transform_json(json, mappings):
|
||||
if isinstance(json, types.ListType):
|
||||
return [transform_json(t, mappings) for t in json]
|
||||
|
||||
if isinstance(json, types.DictionaryType):
|
||||
result = {}
|
||||
for key, value in json.items():
|
||||
result[transform_json(key, mappings)] = \
|
||||
transform_json(value, mappings)
|
||||
return result
|
||||
|
||||
elif isinstance(json, types.ListType):
|
||||
result = []
|
||||
for value in json:
|
||||
result.append(transform_json(value, mappings))
|
||||
return result
|
||||
|
||||
elif isinstance(json, types.StringTypes) and json.startswith('$'):
|
||||
value = convert_macro_parameter(json[1:], mappings)
|
||||
if value is not None:
|
||||
return value
|
||||
|
||||
return json
|
||||
|
||||
|
||||
def convert_macro_parameter(macro, mappings):
|
||||
replaced = [False]
|
||||
|
||||
def replace(match):
|
||||
replaced[0] = True
|
||||
return unicode(mappings.get(match.group(1)))
|
||||
|
||||
result = re.sub('{(\\w+?)}', replace, macro)
|
||||
if replaced[0]:
|
||||
return result
|
||||
else:
|
||||
return mappings.get(macro)
|
||||
|
||||
|
||||
def merge_lists(list1, list2):
|
||||
result = []
|
||||
for item in list1 + list2:
|
||||
exists = False
|
||||
for old_item in result:
|
||||
if deep.diff(item, old_item) is None:
|
||||
exists = True
|
||||
break
|
||||
if not exists:
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def merge_dicts(dict1, dict2, max_levels=0):
|
||||
result = {}
|
||||
for key, value in dict1.items():
|
||||
result[key] = value
|
||||
if key in dict2:
|
||||
other_value = dict2[key]
|
||||
if type(other_value) != type(value):
|
||||
raise TypeError()
|
||||
if max_levels != 1 and isinstance(
|
||||
other_value, types.DictionaryType):
|
||||
result[key] = merge_dicts(
|
||||
value, other_value,
|
||||
0 if max_levels == 0 else max_levels - 1)
|
||||
elif max_levels != 1 and isinstance(
|
||||
other_value, types.ListType):
|
||||
result[key] = merge_lists(value, other_value)
|
||||
else:
|
||||
result[key] = other_value
|
||||
for key, value in dict2.items():
|
||||
if key not in result:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
def find(f, seq):
|
||||
"""Return first item in sequence where f(item) == True."""
|
||||
index = 0
|
||||
for item in seq:
|
||||
if f(item):
|
||||
return item, index
|
||||
index += 1
|
||||
return None, -1
|
||||
|
||||
|
||||
def str2unicode(obj):
|
||||
if isinstance(obj, str):
|
||||
return unicode(obj)
|
||||
elif isinstance(obj, types.DictionaryType):
|
||||
result = {}
|
||||
for key, value in obj.items():
|
||||
result[str2unicode(key)] = str2unicode(value)
|
||||
return result
|
||||
elif isinstance(obj, types.ListType):
|
||||
return [str2unicode(t) for t in obj]
|
||||
return obj
|
@ -1,249 +0,0 @@
|
||||
# Translations template for murano-conductor.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the murano-conductor
|
||||
# project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: murano-conductor 0.4\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-01-20 14:55+0400\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
||||
#: muranoconductor/config.py:151
|
||||
msgid "Invalid syslog facility"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/config.py:225
|
||||
#, python-format
|
||||
msgid "Loading %(app_name)s from %(conf_file)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/config.py:236
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to load %(app_name)s from configuration file %(conf_file)s.\n"
|
||||
"Got: %(e)r"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/eventlet_backdoor.py:141
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/exception.py:103
|
||||
msgid "Uncaught exception"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/excutils.py:62
|
||||
#, python-format
|
||||
msgid "Original exception being dropped: %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/excutils.py:90
|
||||
#, python-format
|
||||
msgid "Unexpected exception occurred %d time(s)... retrying."
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/fileutils.py:64
|
||||
#, python-format
|
||||
msgid "Reloading cached file %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:100
|
||||
#, python-format
|
||||
msgid "Could not release the acquired lock `%s`"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:166
|
||||
#, python-format
|
||||
msgid "Got semaphore \"%(lock)s\""
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:175
|
||||
#, python-format
|
||||
msgid "Attempting to grab file lock \"%(lock)s\""
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:185
|
||||
#, python-format
|
||||
msgid "Created lock path: %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:203
|
||||
#, python-format
|
||||
msgid "Got file lock \"%(lock)s\" at %(path)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:207
|
||||
#, python-format
|
||||
msgid "Released file lock \"%(lock)s\" at %(path)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:244
|
||||
#, python-format
|
||||
msgid "Got semaphore / lock \"%(function)s\""
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/lockutils.py:248
|
||||
#, python-format
|
||||
msgid "Semaphore / lock released \"%(function)s\""
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/log.py:244
|
||||
#, python-format
|
||||
msgid "Deprecated: %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/log.py:336
|
||||
#, python-format
|
||||
msgid "Error loading logging config %(log_config)s: %(err_msg)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/log.py:386
|
||||
#, python-format
|
||||
msgid "syslog facility must be one of: %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/log.py:556
|
||||
#, python-format
|
||||
msgid "Fatal call to deprecated config: %(msg)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/loopingcall.py:84
|
||||
#, python-format
|
||||
msgid "task run outlasted interval by %s sec"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/loopingcall.py:91
|
||||
msgid "in fixed duration looping call"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/loopingcall.py:131
|
||||
#, python-format
|
||||
msgid "Dynamic looping call sleeping for %.02f seconds"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/loopingcall.py:138
|
||||
msgid "in dynamic looping call"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:103
|
||||
#: muranoconductor/openstack/common/service.py:271
|
||||
msgid "Full set of CONF:"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:112
|
||||
#: muranoconductor/openstack/common/service.py:214
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:123
|
||||
msgid "Exception during rpc cleanup."
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:159
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:196
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:219
|
||||
msgid "Unhandled exception"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:226
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:236
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:253
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:257
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:261
|
||||
#, python-format
|
||||
msgid "pid %d not in child list"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:289
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/service.py:300
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/sslutils.py:52
|
||||
#, python-format
|
||||
msgid "Unable to find cert_file : %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/sslutils.py:55
|
||||
#, python-format
|
||||
msgid "Unable to find ca_file : %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/sslutils.py:58
|
||||
#, python-format
|
||||
msgid "Unable to find key_file : %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/sslutils.py:61
|
||||
msgid ""
|
||||
"When running server in SSL mode, you must specify both a cert_file and "
|
||||
"key_file option value in your configuration file"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/sslutils.py:100
|
||||
#, python-format
|
||||
msgid "Invalid SSL version : %s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/notifier/api.py:129
|
||||
#, python-format
|
||||
msgid "%s not in valid priorities"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/notifier/api.py:145
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Problem '%(e)s' attempting to send to notification system. "
|
||||
"Payload=%(payload)s"
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/notifier/api.py:164
|
||||
#, python-format
|
||||
msgid "Failed to load notifier %s. These notifications will not be sent."
|
||||
msgstr ""
|
||||
|
||||
#: muranoconductor/openstack/common/notifier/rpc_notifier.py:45
|
||||
#: muranoconductor/openstack/common/notifier/rpc_notifier2.py:51
|
||||
#, python-format
|
||||
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
|
||||
msgstr ""
|
||||
|
@ -1,197 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
import tarfile
|
||||
import shutil
|
||||
import tempfile
|
||||
import hashlib
|
||||
from glob import glob
|
||||
from metadataclient.common.exceptions import CommunicationError
|
||||
from muranoconductor import config
|
||||
from metadataclient.v1.client import Client
|
||||
import os
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
from keystoneclient.exceptions import EndpointNotFound
|
||||
|
||||
from openstack.common import log as logging
|
||||
|
||||
CHUNK_SIZE = 1 << 20 # 1MB
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class MetadataException(BaseException):
|
||||
# Inherited not from Exception in purpose:
|
||||
# On this exception ack message would not be sent
|
||||
pass
|
||||
|
||||
|
||||
def _unpack_data_archive(task_id, hash):
|
||||
archive_name = hash + '.tar.gz'
|
||||
if not tarfile.is_tarfile(archive_name):
|
||||
raise MetadataException('Received invalid file {0} from Metadata '
|
||||
'Repository'.format(hash))
|
||||
dst_dir = task_id
|
||||
if not os.path.exists(dst_dir):
|
||||
os.mkdir(dst_dir)
|
||||
tar = tarfile.open(archive_name, 'r:gz')
|
||||
try:
|
||||
tar.extractall(path=dst_dir)
|
||||
finally:
|
||||
tar.close()
|
||||
return dst_dir
|
||||
|
||||
|
||||
def get_endpoint(token_id, tenant_id):
|
||||
endpoint = CONF.murano_metadata_url
|
||||
if not endpoint:
|
||||
keystone_settings = CONF.keystone
|
||||
|
||||
client = ksclient.Client(auth_url=keystone_settings.auth_url,
|
||||
token=token_id)
|
||||
|
||||
client.authenticate(
|
||||
auth_url=keystone_settings.auth_url,
|
||||
tenant_id=tenant_id,
|
||||
token=token_id)
|
||||
|
||||
try:
|
||||
endpoint = client.service_catalog.url_for(
|
||||
service_type='murano-metadata')
|
||||
except EndpointNotFound:
|
||||
endpoint = 'http://localhost:8084/v1'
|
||||
log.warning(
|
||||
'Murano Metadata API location could not be found in the '
|
||||
'Keystone Service Catalog, using default: {0}'.format(
|
||||
endpoint))
|
||||
return endpoint
|
||||
|
||||
|
||||
def metadataclient(token_id, tenant_id):
|
||||
endpoint = get_endpoint(token_id, tenant_id)
|
||||
return Client(endpoint=endpoint, token=token_id)
|
||||
|
||||
|
||||
def get_metadata(task_id, token_id, tenant_id):
|
||||
hash = _check_existing_hash()
|
||||
try:
|
||||
log.debug('Retrieving metadata from Murano Metadata Repository')
|
||||
resp, body_iter = metadataclient(token_id, tenant_id).\
|
||||
metadata_client.get_conductor_data(hash)
|
||||
except CommunicationError as e:
|
||||
if hash:
|
||||
log.warning('Metadata update failed: '
|
||||
'Unable to connect Metadata Repository due to {0}. '
|
||||
'Using existing version of metadata'.format(e))
|
||||
else:
|
||||
log.exception(e)
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
raise MetadataException('Unable to get data '
|
||||
'from Metadata Repository due to {0}: '
|
||||
'{1}'.format(exc_type.__name__, exc_value))
|
||||
|
||||
else:
|
||||
if resp.status == 304:
|
||||
log.debug('Metadata unmodified. Using existing archive.')
|
||||
|
||||
elif resp.status == 200:
|
||||
with tempfile.NamedTemporaryFile(delete=False) as archive:
|
||||
for chunk in body_iter:
|
||||
archive.write(chunk)
|
||||
hash = _get_hash(archive.name)
|
||||
shutil.move(archive.name, hash + '.tar.gz')
|
||||
else:
|
||||
msg = 'Metadata update failed: ' \
|
||||
'Got {0} status in response.'.format(resp.status)
|
||||
if hash:
|
||||
log.warning(msg + ' Using existing version of metadata.')
|
||||
else:
|
||||
raise MetadataException(msg)
|
||||
return _unpack_data_archive(task_id, hash)
|
||||
|
||||
|
||||
def release(folder):
|
||||
log.debug('Deleting metadata folder {0}'.format(folder))
|
||||
try:
|
||||
shutil.rmtree(folder)
|
||||
except Exception as e:
|
||||
log.exception('Unable to delete folder {0} with '
|
||||
'task metadata due to {1}'.format(folder, e))
|
||||
|
||||
|
||||
def prepare(data_dir):
|
||||
if not os.path.exists(data_dir):
|
||||
os.makedirs(data_dir)
|
||||
log.info("Creating directory '{0}' to store "
|
||||
"conductor data".format(data_dir))
|
||||
init_scripts_dst = os.path.join(data_dir,
|
||||
os.path.basename(CONF.init_scripts_dir))
|
||||
if os.path.exists(init_scripts_dst):
|
||||
log.info("Found existing init scripts directory at"
|
||||
" '{0}'. Deleting it.'".format(init_scripts_dst))
|
||||
shutil.rmtree(init_scripts_dst)
|
||||
log.info("Copying init scripts directory from '{0}' "
|
||||
"to '{1}'".format(CONF.init_scripts_dir, init_scripts_dst))
|
||||
shutil.copytree(CONF.init_scripts_dir, init_scripts_dst)
|
||||
|
||||
agent_config_dst = os.path.join(data_dir,
|
||||
os.path.basename(CONF.agent_config_dir))
|
||||
if os.path.exists(agent_config_dst):
|
||||
log.info("Found existing agent config directory at"
|
||||
" '{0}'. Deleting it.'".format(agent_config_dst))
|
||||
shutil.rmtree(agent_config_dst)
|
||||
log.info("Copying agent config directory from '{0}' "
|
||||
"to '{1}'".format(CONF.agent_config_dir, agent_config_dst))
|
||||
shutil.copytree(CONF.agent_config_dir, agent_config_dst)
|
||||
os.chdir(data_dir)
|
||||
|
||||
|
||||
def _get_hash(archive_path):
|
||||
"""Calculate SHA1-hash of archive file.
|
||||
|
||||
SHA-1 take a bit more time than MD5 (see http://tinyurl.com/kpj5jy7),
|
||||
but is more secure.
|
||||
"""
|
||||
if os.path.exists(archive_path):
|
||||
sha1 = hashlib.sha1()
|
||||
with open(archive_path) as f:
|
||||
buf = f.read(CHUNK_SIZE)
|
||||
while buf:
|
||||
sha1.update(buf)
|
||||
buf = f.read(CHUNK_SIZE)
|
||||
hsum = sha1.hexdigest()
|
||||
log.debug("Archive '{0}' has hash-sum {1}".format(archive_path, hsum))
|
||||
return hsum
|
||||
else:
|
||||
log.info("Archive '{0}' doesn't exist, no hash to calculate".format(
|
||||
archive_path))
|
||||
return None
|
||||
|
||||
|
||||
def _check_existing_hash():
|
||||
hash_archives = glob('*.tar.gz')
|
||||
if not hash_archives:
|
||||
hash = None
|
||||
else:
|
||||
if len(hash_archives) > 1:
|
||||
log.warning('There are to metadata archive. Deleting them both')
|
||||
for item in hash_archives:
|
||||
os.remove(item)
|
||||
hash = None
|
||||
else:
|
||||
file_name, extension = hash_archives[0].split('.', 1)
|
||||
hash = file_name
|
||||
return hash
|
@ -1,100 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from muranoconductor import xml_code_engine
|
||||
import muranoconductor.config
|
||||
|
||||
from openstack.common import log as logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_subnet(engine, context, body, routerId=None, existingNetwork=None,
|
||||
result=None):
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
|
||||
def callback(result_value):
|
||||
if result is not None:
|
||||
context[result] = {"cidr": result_value}
|
||||
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
|
||||
if existingNetwork:
|
||||
command = "get_existing_subnet"
|
||||
else:
|
||||
command = "get_new_subnet"
|
||||
|
||||
command_dispatcher.execute(
|
||||
name="net",
|
||||
command=command,
|
||||
existingNetwork=existingNetwork,
|
||||
routerId=routerId,
|
||||
callback=callback)
|
||||
|
||||
|
||||
def get_default_router(engine, context, body, result=None):
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
|
||||
def callback(routerId, externalNetId):
|
||||
if result is not None:
|
||||
context[result] = {"routerId": routerId,
|
||||
"floatingId": externalNetId}
|
||||
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
|
||||
command_dispatcher.execute(
|
||||
name="net",
|
||||
command="get_router",
|
||||
callback=callback)
|
||||
|
||||
|
||||
def get_default_network(engine, context, body, result=None):
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
|
||||
def callback(result_value):
|
||||
if result is not None:
|
||||
context[result] = {"networkId": result_value}
|
||||
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
|
||||
command_dispatcher.execute(
|
||||
name="net",
|
||||
command="get_network",
|
||||
callback=callback)
|
||||
|
||||
|
||||
def get_network_topology(engine, context, body, result=None):
|
||||
return muranoconductor.config.CONF.network_topology
|
||||
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
get_subnet, "get-cidr")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
get_default_router, "get-default-router-id")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
get_default_network, "get-default-network-id")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
get_default_router, "get-default-router-id")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
get_network_topology, "get-net-topology")
|
@ -1,111 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-%s' % str(uuid.uuid4())
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""Helper class to represent useful information about a request context.
|
||||
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
||||
|
||||
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||
user_domain=None, project_domain=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None,
|
||||
instance_uuid=None):
|
||||
self.auth_token = auth_token
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.domain = domain
|
||||
self.user_domain = user_domain
|
||||
self.project_domain = project_domain
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
self.instance_uuid = instance_uuid
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
user_idt = (
|
||||
self.user_idt_format.format(user=self.user or '-',
|
||||
tenant=self.tenant or '-',
|
||||
domain=self.domain or '-',
|
||||
user_domain=self.user_domain or '-',
|
||||
p_domain=self.project_domain or '-'))
|
||||
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'domain': self.domain,
|
||||
'user_domain': self.user_domain,
|
||||
'project_domain': self.project_domain,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_token,
|
||||
'request_id': self.request_id,
|
||||
'instance_uuid': self.instance_uuid,
|
||||
'user_identity': user_idt}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted=False):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_user_context(context):
|
||||
"""Indicates if the request context is a normal user."""
|
||||
if not context:
|
||||
return False
|
||||
if context.is_admin:
|
||||
return False
|
||||
if not context.user_id or not context.project_id:
|
||||
return False
|
||||
return True
|
@ -1,145 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import errno
|
||||
import gc
|
||||
import os
|
||||
import pprint
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
|
||||
help_for_backdoor_port = (
|
||||
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||
"in listening on a random tcp port number; <port> results in listening "
|
||||
"on the specified port number (and not enabling backdoor if that port "
|
||||
"is in use); and <start>:<end> results in listening on the smallest "
|
||||
"unused port number within the specified range of port numbers. The "
|
||||
"chosen port is displayed in the service's log file.")
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.StrOpt('backdoor_port',
|
||||
default=None,
|
||||
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventletBackdoorConfigValueError(Exception):
|
||||
def __init__(self, port_range, help_msg, ex):
|
||||
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||
'%(help)s' %
|
||||
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||
self.port_range = port_range
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print("Don't use this, just disconnect instead")
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print(i, gt)
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print()
|
||||
|
||||
|
||||
def _print_nativethreads():
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
print(threadId)
|
||||
traceback.print_stack(stack)
|
||||
print()
|
||||
|
||||
|
||||
def _parse_port_range(port_range):
|
||||
if ':' not in port_range:
|
||||
start, end = port_range, port_range
|
||||
else:
|
||||
start, end = port_range.split(':', 1)
|
||||
try:
|
||||
start, end = int(start), int(end)
|
||||
if end < start:
|
||||
raise ValueError
|
||||
return start, end
|
||||
except ValueError as ex:
|
||||
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||
help_for_backdoor_port)
|
||||
|
||||
|
||||
def _listen(host, start_port, end_port, listen_func):
|
||||
try_port = start_port
|
||||
while True:
|
||||
try:
|
||||
return listen_func((host, try_port))
|
||||
except socket.error as exc:
|
||||
if (exc.errno != errno.EADDRINUSE or try_port >= end_port):
|
||||
raise
|
||||
try_port += 1
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
'pnt': _print_nativethreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
# since it won't interact poorly with gettext, and it's easier to
|
||||
# read the output too.
|
||||
def displayhook(val):
|
||||
if val is not None:
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||
|
||||
# In the case of backdoor port being zero, a port number is assigned by
|
||||
# listen(). In any case, pull the port number out here.
|
||||
port = sock.getsockname()[1]
|
||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||
{'port': port, 'pid': os.getpid()})
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
@ -1,139 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exceptions common to OpenStack projects
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(Error, self).__init__(message)
|
||||
|
||||
|
||||
class ApiError(Error):
|
||||
def __init__(self, message='Unknown', code='Unknown'):
|
||||
self.api_message = message
|
||||
self.code = code
|
||||
super(ApiError, self).__init__('%s: %s' % (code, message))
|
||||
|
||||
|
||||
class NotFound(Error):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownScheme(Error):
|
||||
|
||||
msg_fmt = "Unknown scheme '%s' found in URI"
|
||||
|
||||
def __init__(self, scheme):
|
||||
msg = self.msg_fmt % scheme
|
||||
super(UnknownScheme, self).__init__(msg)
|
||||
|
||||
|
||||
class BadStoreUri(Error):
|
||||
|
||||
msg_fmt = "The Store URI %s was malformed. Reason: %s"
|
||||
|
||||
def __init__(self, uri, reason):
|
||||
msg = self.msg_fmt % (uri, reason)
|
||||
super(BadStoreUri, self).__init__(msg)
|
||||
|
||||
|
||||
class Duplicate(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotAuthorized(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotEmpty(Error):
|
||||
pass
|
||||
|
||||
|
||||
class Invalid(Error):
|
||||
pass
|
||||
|
||||
|
||||
class BadInputError(Exception):
|
||||
"""Error resulting from a client sending bad input to a server"""
|
||||
pass
|
||||
|
||||
|
||||
class MissingArgumentError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class DatabaseMigrationError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class ClientConnectionError(Exception):
|
||||
"""Error resulting from a client connecting to a server"""
|
||||
pass
|
||||
|
||||
|
||||
def wrap_exception(f):
|
||||
def _wrap(*args, **kw):
|
||||
try:
|
||||
return f(*args, **kw)
|
||||
except Exception as e:
|
||||
if not isinstance(e, Error):
|
||||
logging.exception(_('Uncaught exception'))
|
||||
raise Error(str(e))
|
||||
raise
|
||||
_wrap.func_name = f.func_name
|
||||
return _wrap
|
||||
|
||||
|
||||
class OpenstackException(Exception):
|
||||
"""Base Exception class.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'msg_fmt' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
msg_fmt = "An unknown exception occurred"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self._error_string = self.msg_fmt % kwargs
|
||||
|
||||
except Exception:
|
||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
||||
raise
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.msg_fmt
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class MalformedRequestBody(OpenstackException):
|
||||
msg_fmt = "Malformed message body: %(reason)s"
|
||||
|
||||
|
||||
class InvalidContentType(OpenstackException):
|
||||
msg_fmt = "Invalid content type %(content_type)s"
|
@ -1,98 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
|
||||
class save_and_reraise_exception(object):
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
being attempted to be re-raised after an exception handler is run. This
|
||||
can happen when eventlet switches greenthreads or when running an
|
||||
exception handler, code raises and catches an exception. In both
|
||||
cases the exception context will be cleared.
|
||||
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
|
||||
In some cases the caller may not want to re-raise the exception, and
|
||||
for those circumstances this context provides a reraise flag that
|
||||
can be used to suppress the exception. For example:
|
||||
|
||||
except Exception:
|
||||
with save_and_reraise_exception() as ctxt:
|
||||
decide_if_need_reraise()
|
||||
if not should_be_reraised:
|
||||
ctxt.reraise = False
|
||||
"""
|
||||
def __init__(self):
|
||||
self.reraise = True
|
||||
|
||||
def __enter__(self):
|
||||
self.type_, self.value, self.tb, = sys.exc_info()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if exc_type is not None:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(self.type_,
|
||||
self.value,
|
||||
self.tb))
|
||||
return False
|
||||
if self.reraise:
|
||||
raise self.type_, self.value, self.tb
|
||||
|
||||
|
||||
def forever_retry_uncaught_exceptions(infunc):
|
||||
def inner_func(*args, **kwargs):
|
||||
last_log_time = 0
|
||||
last_exc_message = None
|
||||
exc_count = 0
|
||||
while True:
|
||||
try:
|
||||
return infunc(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
if exc.message == last_exc_message:
|
||||
exc_count += 1
|
||||
else:
|
||||
exc_count = 1
|
||||
# Do not log any more frequently than once a minute unless
|
||||
# the exception message changes
|
||||
cur_time = int(time.time())
|
||||
if (cur_time - last_log_time > 60 or
|
||||
exc.message != last_exc_message):
|
||||
logging.exception(
|
||||
_('Unexpected exception occurred %d time(s)... '
|
||||
'retrying.') % exc_count)
|
||||
last_log_time = cur_time
|
||||
last_exc_message = exc.message
|
||||
exc_count = 0
|
||||
# This should be a very rare event. In case it isn't, do
|
||||
# a sleep.
|
||||
time.sleep(1)
|
||||
return inner_func
|
@ -1,110 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import os
|
||||
|
||||
from muranoconductor.openstack.common import excutils
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_FILE_CACHE = {}
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def read_cached_file(filename, force_reload=False):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param force_reload: Whether to reload the file.
|
||||
:returns: A tuple with a boolean specifying if the data is fresh
|
||||
or not.
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if force_reload and filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
|
||||
reloaded = False
|
||||
mtime = os.path.getmtime(filename)
|
||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||
|
||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||
LOG.debug(_("Reloading cached file %s") % filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
reloaded = True
|
||||
return (reloaded, cache_info['data'])
|
||||
|
||||
|
||||
def delete_if_exists(path):
|
||||
"""Delete a file, but ignore file not found error.
|
||||
|
||||
:param path: File to delete
|
||||
"""
|
||||
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def remove_path_on_error(path):
|
||||
"""Protect code that wants to operate on PATH atomically.
|
||||
Any exception will cause PATH to be removed.
|
||||
|
||||
:param path: File to work with
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
delete_if_exists(path)
|
||||
|
||||
|
||||
def file_open(*args, **kwargs):
|
||||
"""Open file
|
||||
|
||||
see built-in file() documentation for more details
|
||||
|
||||
Note: The reason this is kept in a separate module is to easily
|
||||
be able to provide a stub module that doesn't alter system
|
||||
state at all (for unit tests)
|
||||
"""
|
||||
return file(*args, **kwargs)
|
@ -1,259 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
gettext for openstack-common modules.
|
||||
|
||||
Usual usage in an openstack.common module:
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _
|
||||
"""
|
||||
|
||||
import copy
|
||||
import gettext
|
||||
import logging.handlers
|
||||
import os
|
||||
import re
|
||||
import UserString
|
||||
|
||||
import six
|
||||
|
||||
_localedir = os.environ.get('conductor'.upper() + '_LOCALEDIR')
|
||||
_t = gettext.translation('conductor', localedir=_localedir, fallback=True)
|
||||
|
||||
|
||||
def _(msg):
|
||||
return _t.ugettext(msg)
|
||||
|
||||
|
||||
def install(domain):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
"""
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||
unicode=True)
|
||||
|
||||
|
||||
"""
|
||||
Lazy gettext functionality.
|
||||
|
||||
The following is an attempt to introduce a deferred way
|
||||
to do translations on messages in OpenStack. We attempt to
|
||||
override the standard _() function and % (format string) operation
|
||||
to build Message objects that can later be translated when we have
|
||||
more information. Also included is an example LogHandler that
|
||||
translates Messages to an associated locale, effectively allowing
|
||||
many logs, each with their own locale.
|
||||
"""
|
||||
|
||||
|
||||
def get_lazy_gettext(domain):
|
||||
"""Assemble and return a lazy gettext function for a given domain.
|
||||
|
||||
Factory method for a project/module to get a lazy gettext function
|
||||
for its own translation domain (i.e. nova, glance, cinder, etc.)
|
||||
"""
|
||||
|
||||
def _lazy_gettext(msg):
|
||||
"""Create and return a Message object.
|
||||
|
||||
Message encapsulates a string so that we can translate it later when
|
||||
needed.
|
||||
"""
|
||||
return Message(msg, domain)
|
||||
|
||||
return _lazy_gettext
|
||||
|
||||
|
||||
class Message(UserString.UserString, object):
|
||||
"""Class used to encapsulate translatable messages."""
|
||||
def __init__(self, msg, domain):
|
||||
# _msg is the gettext msgid and should never change
|
||||
self._msg = msg
|
||||
self._left_extra_msg = ''
|
||||
self._right_extra_msg = ''
|
||||
self.params = None
|
||||
self.locale = None
|
||||
self.domain = domain
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
# NOTE(mrodden): this should always resolve to a unicode string
|
||||
# that best represents the state of the message currently
|
||||
|
||||
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
||||
if self.locale:
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
languages=[self.locale],
|
||||
fallback=True)
|
||||
else:
|
||||
# use system locale for translations
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
fallback=True)
|
||||
|
||||
full_msg = (self._left_extra_msg +
|
||||
lang.ugettext(self._msg) +
|
||||
self._right_extra_msg)
|
||||
|
||||
if self.params is not None:
|
||||
full_msg = full_msg % self.params
|
||||
|
||||
return six.text_type(full_msg)
|
||||
|
||||
def _save_dictionary_parameter(self, dict_param):
|
||||
full_msg = self.data
|
||||
# look for %(blah) fields in string;
|
||||
# ignore %% and deal with the
|
||||
# case where % is first character on the line
|
||||
keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
|
||||
|
||||
# if we don't find any %(blah) blocks but have a %s
|
||||
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
|
||||
# apparently the full dictionary is the parameter
|
||||
params = copy.deepcopy(dict_param)
|
||||
else:
|
||||
params = {}
|
||||
for key in keys:
|
||||
try:
|
||||
params[key] = copy.deepcopy(dict_param[key])
|
||||
except TypeError:
|
||||
# cast uncopyable thing to unicode string
|
||||
params[key] = unicode(dict_param[key])
|
||||
|
||||
return params
|
||||
|
||||
def _save_parameters(self, other):
|
||||
# we check for None later to see if
|
||||
# we actually have parameters to inject,
|
||||
# so encapsulate if our parameter is actually None
|
||||
if other is None:
|
||||
self.params = (other, )
|
||||
elif isinstance(other, dict):
|
||||
self.params = self._save_dictionary_parameter(other)
|
||||
else:
|
||||
# fallback to casting to unicode,
|
||||
# this will handle the problematic python code-like
|
||||
# objects that cannot be deep-copied
|
||||
try:
|
||||
self.params = copy.deepcopy(other)
|
||||
except TypeError:
|
||||
self.params = unicode(other)
|
||||
|
||||
return self
|
||||
|
||||
# overrides to be more string-like
|
||||
def __unicode__(self):
|
||||
return self.data
|
||||
|
||||
def __str__(self):
|
||||
return self.data.encode('utf-8')
|
||||
|
||||
def __getstate__(self):
|
||||
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
||||
'domain', 'params', 'locale']
|
||||
new_dict = self.__dict__.fromkeys(to_copy)
|
||||
for attr in to_copy:
|
||||
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
||||
|
||||
return new_dict
|
||||
|
||||
def __setstate__(self, state):
|
||||
for (k, v) in state.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
# operator overloads
|
||||
def __add__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._right_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __radd__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._left_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __mod__(self, other):
|
||||
# do a format string to catch and raise
|
||||
# any possible KeyErrors from missing parameters
|
||||
self.data % other
|
||||
copied = copy.deepcopy(self)
|
||||
return copied._save_parameters(other)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.data * other
|
||||
|
||||
def __rmul__(self, other):
|
||||
return other * self.data
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.data[key]
|
||||
|
||||
def __getslice__(self, start, end):
|
||||
return self.data.__getslice__(start, end)
|
||||
|
||||
def __getattribute__(self, name):
|
||||
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
||||
# These override the UserString implementation, since UserString
|
||||
# uses our __class__ attribute to try and build a new message
|
||||
# after running the inner data string through the operation.
|
||||
# At that point, we have lost the gettext message id and can just
|
||||
# safely resolve to a string instead.
|
||||
ops = ['capitalize', 'center', 'decode', 'encode',
|
||||
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
||||
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
||||
if name in ops:
|
||||
return getattr(self.data, name)
|
||||
else:
|
||||
return UserString.UserString.__getattribute__(self, name)
|
||||
|
||||
|
||||
class LocaleHandler(logging.Handler):
|
||||
"""Handler that can have a locale associated to translate Messages.
|
||||
|
||||
A quick example of how to utilize the Message class above.
|
||||
LocaleHandler takes a locale and a target logging.Handler object
|
||||
to forward LogRecord objects to after translating the internal Message.
|
||||
"""
|
||||
|
||||
def __init__(self, locale, target):
|
||||
"""Initialize a LocaleHandler
|
||||
|
||||
:param locale: locale to use for translating messages
|
||||
:param target: logging.Handler object to forward
|
||||
LogRecord objects to after translation
|
||||
"""
|
||||
logging.Handler.__init__(self)
|
||||
self.locale = locale
|
||||
self.target = target
|
||||
|
||||
def emit(self, record):
|
||||
if isinstance(record.msg, Message):
|
||||
# set the locale and resolve to a string
|
||||
record.msg.locale = self.locale
|
||||
|
||||
self.target.emit(record)
|
@ -1,68 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class."""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""Tries to import object from default namespace.
|
||||
|
||||
Imports a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
||||
|
||||
|
||||
def try_import(import_str, default=None):
|
||||
"""Try to import a module and if it fails return default."""
|
||||
try:
|
||||
return import_module(import_str)
|
||||
except ImportError:
|
||||
return default
|
@ -1,172 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
JSON related utilities.
|
||||
|
||||
This module provides a few things:
|
||||
|
||||
1) A handy function for getting an object down to something that can be
|
||||
JSON serialized. See to_primitive().
|
||||
|
||||
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||
automatically use to_primitive() for you if needed.
|
||||
|
||||
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||
is available.
|
||||
'''
|
||||
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
import types
|
||||
import xmlrpclib
|
||||
|
||||
import netaddr
|
||||
import six
|
||||
|
||||
from muranoconductor.openstack.common import timeutils
|
||||
|
||||
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
but since this is a recursive function, we could have cyclical
|
||||
data structures.
|
||||
|
||||
To handle cyclical data structures we could track the actual objects
|
||||
visited in a set, but not all objects are hashable. Instead we just
|
||||
track the depth of the object inspections and don't go too deep.
|
||||
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
# has a @wrap_exception with a notifier will fail. If
|
||||
# we up the dependency to 0.5.4 (when it is released) we
|
||||
# can remove this workaround.
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
elif isinstance(value, netaddr.IPAddress):
|
||||
return six.text_type(value)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
return json.dumps(value, default=default, **kwargs)
|
||||
|
||||
|
||||
def loads(s):
|
||||
return json.loads(s)
|
||||
|
||||
|
||||
def load(s):
|
||||
return json.load(s)
|
||||
|
||||
|
||||
try:
|
||||
import anyjson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||
'loads', ValueError, 'load'))
|
||||
anyjson.force_implementation(__name__)
|
@ -1,48 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Greenthread local storage of variables using weak references"""
|
||||
|
||||
import weakref
|
||||
|
||||
from eventlet import corolocal
|
||||
|
||||
|
||||
class WeakLocal(corolocal.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = corolocal.local.__getattribute__(self, attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
value = weakref.ref(value)
|
||||
return corolocal.local.__setattr__(self, attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = corolocal.local
|
@ -1,276 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import fileutils
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import local
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
help=('Directory to use for lock files.'))
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
def set_defaults(lock_path):
|
||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||
|
||||
|
||||
class _InterProcessLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def __enter__(self):
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
return self
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
except IOError:
|
||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _PosixLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
else:
|
||||
import fcntl
|
||||
InterProcessLock = _PosixLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||
"""Context based lock
|
||||
|
||||
This function yields a `semaphore.Semaphore` instance unless external is
|
||||
True, in which case, it'll yield an InterProcessLock instance.
|
||||
|
||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||
lock files on disk with a meaningful prefix.
|
||||
|
||||
:param external: The external keyword argument denotes whether this lock
|
||||
should work across multiple processes. This means that if two different
|
||||
workers both run a a method decorated with @synchronized('mylock',
|
||||
external=True), only one of them will execute at a time.
|
||||
|
||||
:param lock_path: The lock_path keyword argument is used to specify a
|
||||
special location for external lock files to live. If nothing is set, then
|
||||
CONF.lock_path is used as a default.
|
||||
"""
|
||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
||||
# See http://stackoverflow.com/questions/5390569/dyn
|
||||
# amically-allocating-and-destroying-mutexes
|
||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
||||
if name not in _semaphores:
|
||||
# this check is not racy - we're already holding ref locally
|
||||
# so GC won't remove the item and there was no IO switch
|
||||
# (only valid in greenthreads)
|
||||
_semaphores[name] = sem
|
||||
|
||||
with sem:
|
||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
||||
|
||||
# NOTE(mikal): I know this looks odd
|
||||
if not hasattr(local.strong_store, 'locks_held'):
|
||||
local.strong_store.locks_held = []
|
||||
local.strong_store.locks_held.append(name)
|
||||
|
||||
try:
|
||||
if external and not CONF.disable_process_locking:
|
||||
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
||||
{'lock': name})
|
||||
|
||||
# We need a copy of lock_path because it is non-local
|
||||
local_lock_path = lock_path or CONF.lock_path
|
||||
if not local_lock_path:
|
||||
raise cfg.RequiredOptError('lock_path')
|
||||
|
||||
if not os.path.exists(local_lock_path):
|
||||
fileutils.ensure_tree(local_lock_path)
|
||||
LOG.info(_('Created lock path: %s'), local_lock_path)
|
||||
|
||||
def add_prefix(name, prefix):
|
||||
if not prefix:
|
||||
return name
|
||||
sep = '' if prefix.endswith('-') else '-'
|
||||
return '%s%s%s' % (prefix, sep, name)
|
||||
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
||||
lock_file_prefix)
|
||||
|
||||
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
||||
|
||||
try:
|
||||
lock = InterProcessLock(lock_file_path)
|
||||
with lock as lock:
|
||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
||||
{'lock': name, 'path': lock_file_path})
|
||||
yield lock
|
||||
finally:
|
||||
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
||||
{'lock': name, 'path': lock_file_path})
|
||||
else:
|
||||
yield sem
|
||||
|
||||
finally:
|
||||
local.strong_store.locks_held.remove(name)
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the foo method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
with lock(name, lock_file_prefix, external, lock_path):
|
||||
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
||||
{'function': f.__name__})
|
||||
return f(*args, **kwargs)
|
||||
|
||||
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
||||
{'function': f.__name__})
|
||||
return inner
|
||||
return wrap
|
||||
|
||||
|
||||
def synchronized_with_prefix(lock_file_prefix):
|
||||
"""Partial object generator for the synchronization decorator.
|
||||
|
||||
Redefine @synchronized in each project like so::
|
||||
|
||||
(in nova/utils.py)
|
||||
from nova.openstack.common import lockutils
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('nova-')
|
||||
|
||||
|
||||
(in nova/foo.py)
|
||||
from nova import utils
|
||||
|
||||
@utils.synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix.
|
||||
"""
|
||||
|
||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
@ -1,712 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Openstack logging handler.
|
||||
|
||||
This module adds to logging functionality by adding the option to specify
|
||||
a context object when calling the various log methods. If the context object
|
||||
is not specified, default formatting is used. Additionally, an instance uuid
|
||||
may be passed as part of the log message, which is intended to make it easier
|
||||
for admins to find messages related to a specific instance.
|
||||
|
||||
It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import itertools
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _
|
||||
from muranoconductor.openstack.common import importutils
|
||||
from muranoconductor.openstack.common import jsonutils
|
||||
from muranoconductor.openstack.common import local
|
||||
|
||||
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||
|
||||
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||
# for XML and JSON automatically.
|
||||
_SANITIZE_PATTERNS = []
|
||||
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||
r'(<%(key)s>).*?(</%(key)s>)',
|
||||
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||
|
||||
for key in _SANITIZE_KEYS:
|
||||
for pattern in _FORMAT_PATTERNS:
|
||||
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||
_SANITIZE_PATTERNS.append(reg_ex)
|
||||
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config-append',
|
||||
metavar='PATH',
|
||||
deprecated_name='log-config',
|
||||
help='The name of logging configuration file. It does not '
|
||||
'disable existing loggers, but just appends specified '
|
||||
'logging configuration to any other existing logging '
|
||||
'options. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=None,
|
||||
metavar='FORMAT',
|
||||
help='DEPRECATED. '
|
||||
'A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If no default is set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The base directory used for relative '
|
||||
'--log-file paths'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging. '
|
||||
'Existing syslog format is DEPRECATED during I, '
|
||||
'and then will be changed in J to honor RFC5424'),
|
||||
cfg.BoolOpt('use-syslog-rfc-format',
|
||||
# TODO(bogdando) remove or use True after existing
|
||||
# syslog format deprecation in J
|
||||
default=False,
|
||||
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||
'MSG part of the syslog message. The old format '
|
||||
'without APP-NAME is deprecated in I, '
|
||||
'and will be removed in J.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='Syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error')
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='Format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='Format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='Data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='Prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=[
|
||||
'amqp=WARN',
|
||||
'amqplib=WARN',
|
||||
'boto=WARN',
|
||||
'qpid=WARN',
|
||||
'sqlalchemy=WARN',
|
||||
'suds=INFO',
|
||||
'iso8601=WARN',
|
||||
'requests.packages.urllib3.connectionpool=WARN'
|
||||
],
|
||||
help='List of logger=LEVEL pairs'),
|
||||
cfg.BoolOpt('publish_errors',
|
||||
default=False,
|
||||
help='Publish error events'),
|
||||
cfg.BoolOpt('fatal_deprecations',
|
||||
default=False,
|
||||
help='Make deprecations fatal'),
|
||||
|
||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||
# a full instance (and could include more information), and other times we
|
||||
# are just handed a UUID for the instance.
|
||||
cfg.StrOpt('instance_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance is passed with the log message, format '
|
||||
'it like this'),
|
||||
cfg.StrOpt('instance_uuid_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance UUID is passed with the log message, '
|
||||
'format it like this'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
# our new audit level
|
||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||
# module aware of it so it acts like other levels.
|
||||
logging.AUDIT = logging.INFO + 1
|
||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||
|
||||
|
||||
try:
|
||||
NullHandler = logging.NullHandler
|
||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||
class NullHandler(logging.Handler):
|
||||
def handle(self, record):
|
||||
pass
|
||||
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
def createLock(self):
|
||||
self.lock = None
|
||||
|
||||
|
||||
def _dictify_context(context):
|
||||
if context is None:
|
||||
return None
|
||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||
context = context.to_dict()
|
||||
return context
|
||||
|
||||
|
||||
def _get_binary_name():
|
||||
return os.path.basename(inspect.stack()[-1][1])
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
|
||||
if logfile and logdir:
|
||||
return os.path.join(logdir, logfile)
|
||||
|
||||
if logdir:
|
||||
binary = binary or _get_binary_name()
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def mask_password(message, secret="***"):
|
||||
"""Replace password with 'secret' in message.
|
||||
|
||||
:param message: The string which includes security information.
|
||||
:param secret: value with which to replace passwords.
|
||||
:returns: The unicode value of message with the password fields masked.
|
||||
|
||||
For example:
|
||||
|
||||
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||
"'adminPass' : '***'"
|
||||
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||
"'admin_pass' : '***'"
|
||||
>>> mask_password('"password" : "aaaaa"')
|
||||
'"password" : "***"'
|
||||
>>> mask_password("'original_password' : 'aaaaa'")
|
||||
"'original_password' : '***'"
|
||||
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||
"u'original_password' : u'***'"
|
||||
"""
|
||||
message = six.text_type(message)
|
||||
|
||||
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||
# we don't have to mask any passwords.
|
||||
if not any(key in message for key in _SANITIZE_KEYS):
|
||||
return message
|
||||
|
||||
secret = r'\g<1>' + secret + r'\g<2>'
|
||||
for pattern in _SANITIZE_PATTERNS:
|
||||
message = re.sub(pattern, secret, message)
|
||||
return message
|
||||
|
||||
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
self.logger = logger
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
self._deprecated_messages_sent = dict()
|
||||
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
"""Call this method when a deprecated feature is used.
|
||||
|
||||
If the system is configured for fatal deprecations then the message
|
||||
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||
be raised.
|
||||
|
||||
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||
|
||||
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||
fatal deprecations.
|
||||
|
||||
"""
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
if CONF.fatal_deprecations:
|
||||
self.critical(stdmsg, *args, **kwargs)
|
||||
raise DeprecatedConfig(msg=stdmsg)
|
||||
|
||||
# Using a list because a tuple with dict can't be stored in a set.
|
||||
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||
|
||||
if args in sent_args:
|
||||
# Already logged this message, so don't log it again.
|
||||
return
|
||||
|
||||
sent_args.append(args)
|
||||
self.warn(stdmsg, *args, **kwargs)
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
# NOTE(mrodden): catch any Message/other object and
|
||||
# coerce to unicode before they can get
|
||||
# to the python logging and possibly
|
||||
# cause string encoding trouble
|
||||
if not isinstance(msg, six.string_types):
|
||||
msg = six.text_type(msg)
|
||||
|
||||
if 'extra' not in kwargs:
|
||||
kwargs['extra'] = {}
|
||||
extra = kwargs['extra']
|
||||
|
||||
context = kwargs.pop('context', None)
|
||||
if not context:
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
extra.update(_dictify_context(context))
|
||||
|
||||
instance = kwargs.pop('instance', None)
|
||||
instance_uuid = (extra.get('instance_uuid', None) or
|
||||
kwargs.pop('instance_uuid', None))
|
||||
instance_extra = ''
|
||||
if instance:
|
||||
instance_extra = CONF.instance_format % instance
|
||||
elif instance_uuid:
|
||||
instance_extra = (CONF.instance_uuid_format
|
||||
% {'uuid': instance_uuid})
|
||||
extra['instance'] = instance_extra
|
||||
|
||||
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||
|
||||
extra['project'] = self.project
|
||||
extra['version'] = self.version
|
||||
extra['extra'] = extra.copy()
|
||||
return msg, kwargs
|
||||
|
||||
|
||||
class JSONFormatter(logging.Formatter):
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||
# since logging.config.fileConfig passes it.
|
||||
self.datefmt = datefmt
|
||||
|
||||
def formatException(self, ei, strip_newlines=True):
|
||||
lines = traceback.format_exception(*ei)
|
||||
if strip_newlines:
|
||||
lines = [moves.filter(
|
||||
lambda x: x,
|
||||
line.rstrip().splitlines()) for line in lines]
|
||||
lines = list(itertools.chain(*lines))
|
||||
return lines
|
||||
|
||||
def format(self, record):
|
||||
message = {'message': record.getMessage(),
|
||||
'asctime': self.formatTime(record, self.datefmt),
|
||||
'name': record.name,
|
||||
'msg': record.msg,
|
||||
'args': record.args,
|
||||
'levelname': record.levelname,
|
||||
'levelno': record.levelno,
|
||||
'pathname': record.pathname,
|
||||
'filename': record.filename,
|
||||
'module': record.module,
|
||||
'lineno': record.lineno,
|
||||
'funcname': record.funcName,
|
||||
'created': record.created,
|
||||
'msecs': record.msecs,
|
||||
'relative_created': record.relativeCreated,
|
||||
'thread': record.thread,
|
||||
'thread_name': record.threadName,
|
||||
'process_name': record.processName,
|
||||
'process': record.process,
|
||||
'traceback': None}
|
||||
|
||||
if hasattr(record, 'extra'):
|
||||
message['extra'] = record.extra
|
||||
|
||||
if record.exc_info:
|
||||
message['traceback'] = self.formatException(record.exc_info)
|
||||
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(exc_type, value, tb):
|
||||
extra = {}
|
||||
if CONF.verbose or CONF.debug:
|
||||
extra['exc_info'] = (exc_type, value, tb)
|
||||
getLogger(product_name).critical(
|
||||
"".join(traceback.format_exception_only(exc_type, value)),
|
||||
**extra)
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config_append):
|
||||
try:
|
||||
logging.config.fileConfig(log_config_append,
|
||||
disable_existing_loggers=False)
|
||||
except moves.configparser.Error as exc:
|
||||
raise LogConfigError(log_config_append, str(exc))
|
||||
|
||||
|
||||
def setup(product_name, version='unknown'):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config_append:
|
||||
_load_log_config(CONF.log_config_append)
|
||||
else:
|
||||
_setup_logging_from_conf(product_name, version)
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
|
||||
def set_defaults(logging_context_format_string):
|
||||
cfg.set_defaults(log_opts,
|
||||
logging_context_format_string=
|
||||
logging_context_format_string)
|
||||
|
||||
|
||||
def _find_facility_from_conf():
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
facility = getattr(logging.handlers.SysLogHandler,
|
||||
CONF.syslog_log_facility,
|
||||
None)
|
||||
|
||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||
facility = facility_names.get(CONF.syslog_log_facility)
|
||||
|
||||
if facility is None:
|
||||
valid_facilities = facility_names.keys()
|
||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||
valid_facilities.extend(consts)
|
||||
raise TypeError(_('syslog facility must be one of: %s') %
|
||||
', '.join("'%s'" % fac
|
||||
for fac in valid_facilities))
|
||||
|
||||
return facility
|
||||
|
||||
|
||||
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.binary_name = _get_binary_name()
|
||||
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
msg = super(RFCSysLogHandler, self).format(record)
|
||||
msg = self.binary_name + ' ' + msg
|
||||
return msg
|
||||
|
||||
|
||||
def _setup_logging_from_conf(project, version):
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
if CONF.use_syslog:
|
||||
facility = _find_facility_from_conf()
|
||||
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||
# after existing syslog format deprecation in J
|
||||
if CONF.use_syslog_rfc_format:
|
||||
syslog = RFCSysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
else:
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
log_root.addHandler(syslog)
|
||||
|
||||
logpath = _get_log_file_path()
|
||||
if logpath:
|
||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||
log_root.addHandler(filelog)
|
||||
|
||||
if CONF.use_stderr:
|
||||
streamlog = ColorHandler()
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
elif not logpath:
|
||||
# pass sys.stdout as a positional argument
|
||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||
streamlog = logging.StreamHandler(sys.stdout)
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
handler = importutils.import_object(
|
||||
"openstack.common.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(ContextFormatter(project=project,
|
||||
version=version,
|
||||
datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
level = logging.getLevelName(level_name)
|
||||
logger = logging.getLogger(mod)
|
||||
logger.setLevel(level)
|
||||
|
||||
_loggers = {}
|
||||
|
||||
|
||||
def getLogger(name='unknown', version='unknown'):
|
||||
if name not in _loggers:
|
||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||
name,
|
||||
version)
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""Returns lazy logger.
|
||||
|
||||
Creates a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created.
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
def __init__(self, logger, level=logging.INFO):
|
||||
self.logger = logger
|
||||
self.level = level
|
||||
|
||||
def write(self, msg):
|
||||
self.logger.log(self.level, msg.rstrip())
|
||||
|
||||
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
and logging_default_format_string. You can also specify
|
||||
logging_debug_format_suffix to append extra formatting if the log level is
|
||||
debug.
|
||||
|
||||
For information about what variables are available for the formatter see:
|
||||
http://docs.python.org/library/logging.html#formatter
|
||||
|
||||
If available, uses the context value stored in TLS - local.store.context
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize ContextFormatter instance
|
||||
|
||||
Takes additional keyword arguments which can be used in the message
|
||||
format string.
|
||||
|
||||
:keyword project: project name
|
||||
:type project: string
|
||||
:keyword version: project version
|
||||
:type version: string
|
||||
|
||||
"""
|
||||
|
||||
self.project = kwargs.pop('project', 'unknown')
|
||||
self.version = kwargs.pop('version', 'unknown')
|
||||
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
"""Uses contextstring if request_id is set, otherwise default."""
|
||||
|
||||
# store project info
|
||||
record.project = self.project
|
||||
record.version = self.version
|
||||
|
||||
# store request info
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
d = _dictify_context(context)
|
||||
for k, v in d.items():
|
||||
setattr(record, k, v)
|
||||
|
||||
# NOTE(sdague): default the fancier formatting params
|
||||
# to an empty string so we don't throw an exception if
|
||||
# they get used
|
||||
for key in ('instance', 'color'):
|
||||
if key not in record.__dict__:
|
||||
record.__dict__[key] = ''
|
||||
|
||||
if record.__dict__.get('request_id', None):
|
||||
self._fmt = CONF.logging_context_format_string
|
||||
else:
|
||||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formatted copy
|
||||
if record.exc_info:
|
||||
record.exc_text = self.formatException(record.exc_info, record)
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
def formatException(self, exc_info, record=None):
|
||||
"""Format exception output with CONF.logging_exception_prefix."""
|
||||
if not record:
|
||||
return logging.Formatter.formatException(self, exc_info)
|
||||
|
||||
stringbuffer = moves.StringIO()
|
||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||
None, stringbuffer)
|
||||
lines = stringbuffer.getvalue().split('\n')
|
||||
stringbuffer.close()
|
||||
|
||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
formatted_lines = []
|
||||
for line in lines:
|
||||
pl = CONF.logging_exception_prefix % record.__dict__
|
||||
fl = '%s%s' % (pl, line)
|
||||
formatted_lines.append(fl)
|
||||
return '\n'.join(formatted_lines)
|
||||
|
||||
|
||||
class ColorHandler(logging.StreamHandler):
|
||||
LEVEL_COLORS = {
|
||||
logging.DEBUG: '\033[00;32m', # GREEN
|
||||
logging.INFO: '\033[00;36m', # CYAN
|
||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
record.color = self.LEVEL_COLORS[record.levelno]
|
||||
return logging.StreamHandler.format(self, record)
|
||||
|
||||
|
||||
class DeprecatedConfig(Exception):
|
||||
message = _("Fatal call to deprecated config: %(msg)s")
|
||||
|
||||
def __init__(self, msg):
|
||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
@ -1,147 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCall.
|
||||
|
||||
The poll-function passed to LoopingCall can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCall.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = timeutils.utcnow()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = timeutils.utcnow()
|
||||
if not self._running:
|
||||
break
|
||||
delay = interval - timeutils.delta_seconds(start, end)
|
||||
if delay <= 0:
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
@ -1,81 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import urlparse
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""Interpret a string as a host:port pair.
|
||||
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
[2001:db8:85a3::8a2e:370]:7334.
|
||||
|
||||
>>> parse_host_port('server01:80')
|
||||
('server01', 80)
|
||||
>>> parse_host_port('server01')
|
||||
('server01', None)
|
||||
>>> parse_host_port('server01', default_port=1234)
|
||||
('server01', 1234)
|
||||
>>> parse_host_port('[::1]:80')
|
||||
('::1', 80)
|
||||
>>> parse_host_port('[::1]')
|
||||
('::1', None)
|
||||
>>> parse_host_port('[::1]', default_port=1234)
|
||||
('::1', 1234)
|
||||
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||
|
||||
"""
|
||||
if address[0] == '[':
|
||||
# Escaped ipv6
|
||||
_host, _port = address[1:].split(']')
|
||||
host = _host
|
||||
if ':' in _port:
|
||||
port = _port.split(':')[1]
|
||||
else:
|
||||
port = default_port
|
||||
else:
|
||||
if address.count(':') == 1:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
# 0 means ipv4, >1 means ipv6.
|
||||
# We prohibit unescaped ipv6 addresses with port.
|
||||
host = address
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
||||
|
||||
|
||||
def urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||
This function papers over Python issue9374 when needed.
|
||||
|
||||
The parameters are the same as urlparse.urlsplit.
|
||||
"""
|
||||
scheme, netloc, path, query, fragment = urlparse.urlsplit(
|
||||
url, scheme, allow_fragments)
|
||||
if allow_fragments and '#' in path:
|
||||
path, fragment = path.split('#', 1)
|
||||
if '?' in path:
|
||||
path, query = path.split('?', 1)
|
||||
return urlparse.SplitResult(scheme, netloc, path, query, fragment)
|
@ -1,14 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,173 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import socket
|
||||
import uuid
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import context
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import importutils
|
||||
from muranoconductor.openstack.common import jsonutils
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import timeutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
help='Driver or drivers to handle sending notifications'),
|
||||
cfg.StrOpt('default_notification_level',
|
||||
default='INFO',
|
||||
help='Default notification level for outgoing notifications'),
|
||||
cfg.StrOpt('default_publisher_id',
|
||||
default=None,
|
||||
help='Default publisher_id for outgoing notifications'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(notifier_opts)
|
||||
|
||||
WARN = 'WARN'
|
||||
INFO = 'INFO'
|
||||
ERROR = 'ERROR'
|
||||
CRITICAL = 'CRITICAL'
|
||||
DEBUG = 'DEBUG'
|
||||
|
||||
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||
|
||||
|
||||
class BadPriorityException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def notify_decorator(name, fn):
|
||||
"""Decorator for notify which is used from utils.monkey_patch().
|
||||
|
||||
:param name: name of the function
|
||||
:param function: - object of the function
|
||||
:returns: function -- decorated function
|
||||
|
||||
"""
|
||||
def wrapped_func(*args, **kwarg):
|
||||
body = {}
|
||||
body['args'] = []
|
||||
body['kwarg'] = {}
|
||||
for arg in args:
|
||||
body['args'].append(arg)
|
||||
for key in kwarg:
|
||||
body['kwarg'][key] = kwarg[key]
|
||||
|
||||
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
||||
notify(ctxt,
|
||||
CONF.default_publisher_id or socket.gethostname(),
|
||||
name,
|
||||
CONF.default_notification_level,
|
||||
body)
|
||||
return fn(*args, **kwarg)
|
||||
return wrapped_func
|
||||
|
||||
|
||||
def publisher_id(service, host=None):
|
||||
if not host:
|
||||
try:
|
||||
host = CONF.host
|
||||
except AttributeError:
|
||||
host = CONF.default_publisher_id or socket.gethostname()
|
||||
return "%s.%s" % (service, host)
|
||||
|
||||
|
||||
def notify(context, publisher_id, event_type, priority, payload):
|
||||
"""Sends a notification using the specified driver
|
||||
|
||||
:param publisher_id: the source worker_type.host of the message
|
||||
:param event_type: the literal type of event (ex. Instance Creation)
|
||||
:param priority: patterned after the enumeration of Python logging
|
||||
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||
:param payload: A python dictionary of attributes
|
||||
|
||||
Outgoing message format includes the above parameters, and appends the
|
||||
following:
|
||||
|
||||
message_id
|
||||
a UUID representing the id for this notification
|
||||
|
||||
timestamp
|
||||
the GMT timestamp the notification was sent at
|
||||
|
||||
The composite message will be constructed as a dictionary of the above
|
||||
attributes, which will then be sent via the transport mechanism defined
|
||||
by the driver.
|
||||
|
||||
Message example::
|
||||
|
||||
{'message_id': str(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': timeutils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
|
||||
"""
|
||||
if priority not in log_levels:
|
||||
raise BadPriorityException(
|
||||
_('%s not in valid priorities') % priority)
|
||||
|
||||
# Ensure everything is JSON serializable.
|
||||
payload = jsonutils.to_primitive(payload, convert_instances=True)
|
||||
|
||||
msg = dict(message_id=str(uuid.uuid4()),
|
||||
publisher_id=publisher_id,
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=str(timeutils.utcnow()))
|
||||
|
||||
for driver in _get_drivers():
|
||||
try:
|
||||
driver.notify(context, msg)
|
||||
except Exception as e:
|
||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
||||
"send to notification system. "
|
||||
"Payload=%(payload)s")
|
||||
% dict(e=e, payload=payload))
|
||||
|
||||
|
||||
_drivers = None
|
||||
|
||||
|
||||
def _get_drivers():
|
||||
"""Instantiate, cache, and return drivers based on the CONF."""
|
||||
global _drivers
|
||||
if _drivers is None:
|
||||
_drivers = {}
|
||||
for notification_driver in CONF.notification_driver:
|
||||
try:
|
||||
driver = importutils.import_module(notification_driver)
|
||||
_drivers[notification_driver] = driver
|
||||
except ImportError:
|
||||
LOG.exception(_("Failed to load notifier %s. "
|
||||
"These notifications will not be sent.") %
|
||||
notification_driver)
|
||||
return _drivers.values()
|
||||
|
||||
|
||||
def _reset_drivers():
|
||||
"""Used by unit tests to reset the drivers."""
|
||||
global _drivers
|
||||
_drivers = None
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import jsonutils
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model.
|
||||
|
||||
Log notifications using openstack's default logging system.
|
||||
"""
|
||||
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
logger = logging.getLogger(
|
||||
'conductor.openstack.common.notification.%s' %
|
||||
message['event_type'])
|
||||
getattr(logger, priority)(jsonutils.dumps(message))
|
@ -1,19 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model."""
|
||||
pass
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import context as req_context
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'notification_topics', default=['notifications', ],
|
||||
help='AMQP topic used for openstack notifications')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opt(notification_topic_opt)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.notification_topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''messaging based notification driver, with message envelopes'''
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import context as req_context
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'topics', default=['notifications', ],
|
||||
help='AMQP topic(s) used for openstack notifications')
|
||||
|
||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||
title='Options for rpc_notifier2')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opt(notification_topic_opt, opt_group)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.rpc_notifier2.topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message, envelope=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
NOTIFICATIONS = []
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Test notifier, stores notifications in memory for unittests."""
|
||||
NOTIFICATIONS.append(message)
|
@ -1,376 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet import event
|
||||
import logging as std_logging
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common import eventlet_backdoor
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
from muranoconductor.openstack.common import importutils
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import threadgroup
|
||||
|
||||
|
||||
rpc = importutils.try_import('conductor.openstack.common.rpc')
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Launcher(object):
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services = Services()
|
||||
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
def launch_service(self, service):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self.services.add(service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Waits until all services have been stopped, and then returns.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.wait()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super(SignalExit, self).__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
def _handle_signal(self, signo, frame):
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
raise SignalExit(signo)
|
||||
|
||||
def wait(self):
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
status = None
|
||||
try:
|
||||
super(ServiceLauncher, self).wait()
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
finally:
|
||||
self.stop()
|
||||
if rpc:
|
||||
try:
|
||||
rpc.cleanup()
|
||||
except Exception:
|
||||
# We're shutting down, so it doesn't matter at this point.
|
||||
LOG.exception(_('Exception during rpc cleanup.'))
|
||||
return status
|
||||
|
||||
|
||||
class ServiceWrapper(object):
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher(object):
|
||||
def __init__(self):
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read()
|
||||
|
||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process(self, service):
|
||||
# Setup child signal handlers differently
|
||||
def _sigterm(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
raise SignalExit(signal.SIGTERM)
|
||||
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher()
|
||||
launcher.launch_service(service)
|
||||
launcher.wait()
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
status = 0
|
||||
try:
|
||||
self._child_process(wrap.service)
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
wrap.service.stop()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(.01)
|
||||
continue
|
||||
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
if self.sigcaught:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
||||
LOG.info(_('Caught %s, stopping children'), signame)
|
||||
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
# signal that the service is done shutting itself down:
|
||||
self._done = event.Event()
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.tg.stop()
|
||||
self.tg.wait()
|
||||
# Signal that service cleanup is done:
|
||||
if not self._done.ready():
|
||||
self._done.send()
|
||||
|
||||
def wait(self):
|
||||
self._done.wait()
|
||||
|
||||
|
||||
class Services(object):
|
||||
|
||||
def __init__(self):
|
||||
self.services = []
|
||||
self.tg = threadgroup.ThreadGroup()
|
||||
self.done = event.Event()
|
||||
|
||||
def add(self, service):
|
||||
self.services.append(service)
|
||||
self.tg.add_thread(self.run_service, service, self.done)
|
||||
|
||||
def stop(self):
|
||||
# wait for graceful shutdown of services:
|
||||
for service in self.services:
|
||||
service.stop()
|
||||
service.wait()
|
||||
|
||||
# Each service has performed cleanup, now signal that the run_service
|
||||
# wrapper threads can now die:
|
||||
if not self.done.ready():
|
||||
self.done.send()
|
||||
|
||||
# reap threads:
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
self.tg.wait()
|
||||
|
||||
@staticmethod
|
||||
def run_service(service, done):
|
||||
"""Service start wrapper.
|
||||
|
||||
:param service: service to run
|
||||
:param done: event to wait on until a shutdown is triggered
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.start()
|
||||
done.wait()
|
||||
|
||||
|
||||
def launch(service, workers=None):
|
||||
if workers:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
else:
|
||||
launcher = ServiceLauncher()
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
@ -1,100 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import ssl
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from muranoconductor.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
|
||||
ssl_opts = [
|
||||
cfg.StrOpt('ca_file',
|
||||
default=None,
|
||||
help="CA certificate file to use to verify "
|
||||
"connecting clients"),
|
||||
cfg.StrOpt('cert_file',
|
||||
default=None,
|
||||
help="Certificate file to use when starting "
|
||||
"the server securely"),
|
||||
cfg.StrOpt('key_file',
|
||||
default=None,
|
||||
help="Private key file to use when starting "
|
||||
"the server securely"),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ssl_opts, "ssl")
|
||||
|
||||
|
||||
def is_enabled():
|
||||
cert_file = CONF.ssl.cert_file
|
||||
key_file = CONF.ssl.key_file
|
||||
ca_file = CONF.ssl.ca_file
|
||||
use_ssl = cert_file or key_file
|
||||
|
||||
if cert_file and not os.path.exists(cert_file):
|
||||
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
|
||||
|
||||
if ca_file and not os.path.exists(ca_file):
|
||||
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
|
||||
|
||||
if key_file and not os.path.exists(key_file):
|
||||
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
|
||||
|
||||
if use_ssl and (not cert_file or not key_file):
|
||||
raise RuntimeError(_("When running server in SSL mode, you must "
|
||||
"specify both a cert_file and key_file "
|
||||
"option value in your configuration file"))
|
||||
|
||||
return use_ssl
|
||||
|
||||
|
||||
def wrap(sock):
|
||||
ssl_kwargs = {
|
||||
'server_side': True,
|
||||
'certfile': CONF.ssl.cert_file,
|
||||
'keyfile': CONF.ssl.key_file,
|
||||
'cert_reqs': ssl.CERT_NONE,
|
||||
}
|
||||
|
||||
if CONF.ssl.ca_file:
|
||||
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
|
||||
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
return ssl.wrap_socket(sock, **ssl_kwargs)
|
||||
|
||||
|
||||
_SSL_PROTOCOLS = {
|
||||
"tlsv1": ssl.PROTOCOL_TLSv1,
|
||||
"sslv23": ssl.PROTOCOL_SSLv23,
|
||||
"sslv3": ssl.PROTOCOL_SSLv3
|
||||
}
|
||||
|
||||
try:
|
||||
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def validate_ssl_version(version):
|
||||
key = version.lower()
|
||||
try:
|
||||
return _SSL_PROTOCOLS[key]
|
||||
except KeyError:
|
||||
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
@ -1,121 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from muranoconductor.openstack.common import log as logging
|
||||
from muranoconductor.openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
kwargs['group'].thread_done(kwargs['thread'])
|
||||
|
||||
|
||||
class Thread(object):
|
||||
"""Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group):
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
def stop(self):
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
"""The point of the ThreadGroup classis to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, thread_pool_size=10):
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
self.timers.append(timer)
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def stop(self):
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
self.timers = []
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
@ -1,188 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Time related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
|
||||
import iso8601
|
||||
import six
|
||||
|
||||
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
"""Parse time from ISO 8601 format."""
|
||||
try:
|
||||
return iso8601.parse_date(timestr)
|
||||
except iso8601.ParseError as e:
|
||||
raise ValueError(e.message)
|
||||
except TypeError as e:
|
||||
raise ValueError(e.message)
|
||||
|
||||
|
||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Returns formatted utcnow."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
return at.strftime(fmt)
|
||||
|
||||
|
||||
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Turn a formatted time back into a datetime."""
|
||||
return datetime.datetime.strptime(timestr, fmt)
|
||||
|
||||
|
||||
def normalize_time(timestamp):
|
||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||
offset = timestamp.utcoffset()
|
||||
if offset is None:
|
||||
return timestamp
|
||||
return timestamp.replace(tzinfo=None) - offset
|
||||
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, six.string_types):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, six.string_types):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def utcnow_ts():
|
||||
"""Timestamp version of our utcnow function."""
|
||||
return calendar.timegm(utcnow().timetuple())
|
||||
|
||||
|
||||
def utcnow():
|
||||
"""Overridable version of utils.utcnow."""
|
||||
if utcnow.override_time:
|
||||
try:
|
||||
return utcnow.override_time.pop(0)
|
||||
except AttributeError:
|
||||
return utcnow.override_time
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp."""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
||||
"""Overrides utils.utcnow.
|
||||
|
||||
Make it return a constant time or a list thereof, one at a time.
|
||||
"""
|
||||
utcnow.override_time = override_time
|
||||
|
||||
|
||||
def advance_time_delta(timedelta):
|
||||
"""Advance overridden time using a datetime.timedelta."""
|
||||
assert(not utcnow.override_time is None)
|
||||
try:
|
||||
for dt in utcnow.override_time:
|
||||
dt += timedelta
|
||||
except TypeError:
|
||||
utcnow.override_time += timedelta
|
||||
|
||||
|
||||
def advance_time_seconds(seconds):
|
||||
"""Advance overridden time by seconds."""
|
||||
advance_time_delta(datetime.timedelta(0, seconds))
|
||||
|
||||
|
||||
def clear_time_override():
|
||||
"""Remove the overridden time."""
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def marshall_now(now=None):
|
||||
"""Make an rpc-safe datetime with microseconds.
|
||||
|
||||
Note: tzinfo is stripped, but not required for relative times.
|
||||
"""
|
||||
if not now:
|
||||
now = utcnow()
|
||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||
minute=now.minute, second=now.second,
|
||||
microsecond=now.microsecond)
|
||||
|
||||
|
||||
def unmarshall_time(tyme):
|
||||
"""Unmarshall a datetime dict."""
|
||||
return datetime.datetime(day=tyme['day'],
|
||||
month=tyme['month'],
|
||||
year=tyme['year'],
|
||||
hour=tyme['hour'],
|
||||
minute=tyme['minute'],
|
||||
second=tyme['second'],
|
||||
microsecond=tyme['microsecond'])
|
||||
|
||||
|
||||
def delta_seconds(before, after):
|
||||
"""Return the difference between two timing objects.
|
||||
|
||||
Compute the difference in seconds between two date, time, or
|
||||
datetime objects (as a float, to microsecond resolution).
|
||||
"""
|
||||
delta = after - before
|
||||
try:
|
||||
return delta.total_seconds()
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
@ -1,74 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from xml.dom import minidom
|
||||
from xml.parsers import expat
|
||||
from xml import sax
|
||||
from xml.sax import expatreader
|
||||
|
||||
|
||||
class ProtectedExpatParser(expatreader.ExpatParser):
|
||||
"""An expat parser which disables DTD's and entities by default."""
|
||||
|
||||
def __init__(self, forbid_dtd=True, forbid_entities=True,
|
||||
*args, **kwargs):
|
||||
# Python 2.x old style class
|
||||
expatreader.ExpatParser.__init__(self, *args, **kwargs)
|
||||
self.forbid_dtd = forbid_dtd
|
||||
self.forbid_entities = forbid_entities
|
||||
|
||||
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
|
||||
raise ValueError("Inline DTD forbidden")
|
||||
|
||||
def entity_decl(self, entityName, is_parameter_entity, value, base,
|
||||
systemId, publicId, notationName):
|
||||
raise ValueError("<!ENTITY> entity declaration forbidden")
|
||||
|
||||
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
|
||||
# expat 1.2
|
||||
raise ValueError("<!ENTITY> unparsed entity forbidden")
|
||||
|
||||
def external_entity_ref(self, context, base, systemId, publicId):
|
||||
raise ValueError("<!ENTITY> external entity forbidden")
|
||||
|
||||
def notation_decl(self, name, base, sysid, pubid):
|
||||
raise ValueError("<!ENTITY> notation forbidden")
|
||||
|
||||
def reset(self):
|
||||
expatreader.ExpatParser.reset(self)
|
||||
if self.forbid_dtd:
|
||||
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
|
||||
self._parser.EndDoctypeDeclHandler = None
|
||||
if self.forbid_entities:
|
||||
self._parser.EntityDeclHandler = self.entity_decl
|
||||
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
|
||||
self._parser.ExternalEntityRefHandler = self.external_entity_ref
|
||||
self._parser.NotationDeclHandler = self.notation_decl
|
||||
try:
|
||||
self._parser.SkippedEntityHandler = None
|
||||
except AttributeError:
|
||||
# some pyexpat versions do not support SkippedEntity
|
||||
pass
|
||||
|
||||
|
||||
def safe_minidom_parse_string(xml_string):
|
||||
"""Parse an XML string using minidom safely.
|
||||
|
||||
"""
|
||||
try:
|
||||
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
|
||||
except sax.SAXParseException:
|
||||
raise expat.ExpatError()
|
@ -1,62 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from openstack.common import log as logging
|
||||
import xml_code_engine
|
||||
from muranocommon.messaging import Message
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Reporter(object):
|
||||
def __init__(self, rmqclient, task_id, environment_id):
|
||||
self._rmqclient = rmqclient
|
||||
self._task_id = task_id
|
||||
self._environment_id = environment_id
|
||||
rmqclient.declare('task-reports', enable_ha=True)
|
||||
|
||||
def report_generic(self, text, details=None, level='info'):
|
||||
return self._report_func(None, None, text, details, level)
|
||||
|
||||
def _report_func(self, id, entity, text, details=None, level='info',
|
||||
**kwargs):
|
||||
body = {
|
||||
'id': id,
|
||||
'entity': entity,
|
||||
'text': text,
|
||||
'details': details,
|
||||
'level': level,
|
||||
'environment_id': self._environment_id
|
||||
}
|
||||
|
||||
msg = Message()
|
||||
msg.body = body
|
||||
msg.id = self._task_id
|
||||
|
||||
self._rmqclient.send(
|
||||
message=msg,
|
||||
key='task-reports')
|
||||
log.debug("Reported '%s' to API", body)
|
||||
|
||||
|
||||
def _report_func(context, id, entity, text, **kwargs):
|
||||
reporter = context['/reporter']
|
||||
return reporter._report_func(id, entity, text, **kwargs)
|
||||
|
||||
|
||||
class ReportedException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(_report_func, "report")
|
@ -1,140 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os.path
|
||||
import datetime
|
||||
from muranoconductor.commands.vm_agent import AgentTimeoutException
|
||||
from muranoconductor.commands.vm_agent import UnhandledAgentException
|
||||
|
||||
import xml_code_engine
|
||||
|
||||
from openstack.common import log as logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_results(result_value, ok, errors):
|
||||
if isinstance(result_value, AgentTimeoutException):
|
||||
errors.append({
|
||||
'source': 'timeout',
|
||||
'message': result_value.message,
|
||||
'timeout': result_value.timeout,
|
||||
'timestamp': datetime.datetime.now().isoformat()
|
||||
})
|
||||
elif isinstance(result_value, dict):
|
||||
if result_value.get('FormatVersion', '1.0.0').startswith('1.'):
|
||||
_extract_v1_results(result_value, ok, errors)
|
||||
else:
|
||||
_extract_v2_results(result_value, ok, errors)
|
||||
|
||||
|
||||
def _extract_v1_results(result_value, ok, errors):
|
||||
if result_value['IsException']:
|
||||
errors.append(dict(_get_exception_info(
|
||||
result_value.get('Result', [])), source='execution_plan'))
|
||||
else:
|
||||
for res in result_value.get('Result', []):
|
||||
if res['IsException']:
|
||||
errors.append(dict(_get_exception_info(
|
||||
res.get('Result', [])), source='command'))
|
||||
else:
|
||||
ok.append(res)
|
||||
|
||||
|
||||
def _extract_v2_results(result_value, ok, errors):
|
||||
error_code = result_value.get('ErrorCode', 0)
|
||||
if not error_code:
|
||||
ok.append(result_value.get('Body'))
|
||||
else:
|
||||
body = result_value.get('Body') or {}
|
||||
err = {
|
||||
'message': body.get('Message'),
|
||||
'details': body.get('AdditionalInfo'),
|
||||
'errorCode': error_code,
|
||||
'time': result_value.get('Time')
|
||||
}
|
||||
for attr in ('Message', 'AdditionalInfo'):
|
||||
if attr in body:
|
||||
del body[attr]
|
||||
err['extra'] = body if body else None
|
||||
errors.append(err)
|
||||
|
||||
|
||||
def send_command(engine, context, body, template, service, unit,
|
||||
mappings=None, result=None, error=None, timeout=None,
|
||||
osVersion=None, **kwargs):
|
||||
metadata_id = context['/metadata_id']
|
||||
if not mappings:
|
||||
mappings = {}
|
||||
if osVersion:
|
||||
template = os.path.join(osVersion, template)
|
||||
command_dispatcher = context['/commandDispatcher']
|
||||
if timeout:
|
||||
timeout = int(timeout)
|
||||
|
||||
def callback(result_value):
|
||||
log.info(
|
||||
'Received result from {2} for {0}: {1}'.format(
|
||||
template, result_value, unit))
|
||||
ok = []
|
||||
errors = []
|
||||
_extract_results(result_value, ok, errors)
|
||||
|
||||
if ok or not errors:
|
||||
if result is not None:
|
||||
context[result] = ok
|
||||
success_handler = body.find('success')
|
||||
if success_handler is not None:
|
||||
engine.evaluate_content(success_handler, context)
|
||||
if errors:
|
||||
if error is not None:
|
||||
context[error] = errors
|
||||
failure_handler = body.find('failure')
|
||||
if failure_handler is not None:
|
||||
log.warning(
|
||||
'Handling errors ({0}) in failure block'.format(errors))
|
||||
engine.evaluate_content(failure_handler, context)
|
||||
else:
|
||||
log.error("No failure block found for errors", exc_info=True)
|
||||
if isinstance(result_value, AgentTimeoutException):
|
||||
raise result_value
|
||||
else:
|
||||
raise UnhandledAgentException(errors)
|
||||
|
||||
command_dispatcher.execute(
|
||||
name='agent',
|
||||
template=template,
|
||||
mappings=mappings,
|
||||
unit=unit,
|
||||
service=service,
|
||||
callback=callback,
|
||||
timeout=timeout,
|
||||
metadata_id=metadata_id)
|
||||
|
||||
|
||||
def _get_array_item(array, index):
|
||||
return array[index] if len(array) > index else None
|
||||
|
||||
|
||||
def _get_exception_info(data):
|
||||
data = data or []
|
||||
return {
|
||||
'type': _get_array_item(data, 0),
|
||||
'message': _get_array_item(data, 1),
|
||||
'command': _get_array_item(data, 2),
|
||||
'details': _get_array_item(data, 3),
|
||||
'timestamp': datetime.datetime.now().isoformat()
|
||||
}
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(send_command, "send-command")
|
@ -1,325 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import anyjson
|
||||
import jsonpath
|
||||
import re
|
||||
import types
|
||||
|
||||
import function_context
|
||||
import xml_code_engine
|
||||
from muranocommon.helpers.token_sanitizer import TokenSanitizer
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
object_id = id
|
||||
|
||||
|
||||
class Workflow(object):
|
||||
def __init__(self, filename, data, command_dispatcher,
|
||||
config, reporter, metadata_id):
|
||||
self._data = data
|
||||
self._engine = xml_code_engine.XmlCodeEngine()
|
||||
with open(filename) as xml:
|
||||
self._engine.load(xml)
|
||||
self._command_dispatcher = command_dispatcher
|
||||
self._config = config
|
||||
self._reporter = reporter
|
||||
|
||||
# format: (rule-id, entity-id) => True for auto-reset bans,
|
||||
# False for permanent bans
|
||||
self._blacklist = {}
|
||||
self._metadata_id = metadata_id
|
||||
|
||||
def execute(self):
|
||||
context = function_context.Context()
|
||||
context['/dataSource'] = self._data
|
||||
context['/commandDispatcher'] = self._command_dispatcher
|
||||
context['/config'] = self._config
|
||||
context['/reporter'] = self._reporter
|
||||
context['/__blacklist'] = self._blacklist
|
||||
context['/metadata_id'] = self._metadata_id
|
||||
return self._engine.execute(context)
|
||||
|
||||
def prepare(self):
|
||||
permanent_bans = dict([
|
||||
(key, value) for key, value
|
||||
in self._blacklist.iteritems()
|
||||
if value is False
|
||||
])
|
||||
self._blacklist.clear()
|
||||
self._blacklist.update(permanent_bans)
|
||||
|
||||
@staticmethod
|
||||
def _get_path(obj, path, create_non_existing=False):
|
||||
current = obj
|
||||
for part in path:
|
||||
if isinstance(current, types.ListType):
|
||||
current = current[int(part)]
|
||||
elif isinstance(current, types.DictionaryType):
|
||||
if part not in current:
|
||||
if create_non_existing:
|
||||
current[part] = {}
|
||||
else:
|
||||
return None
|
||||
current = current[part]
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
return current
|
||||
|
||||
@staticmethod
|
||||
def _set_path(obj, path, value):
|
||||
current = Workflow._get_path(obj, path[:-1], True)
|
||||
if isinstance(current, types.ListType):
|
||||
current[int(path[-1])] = value
|
||||
elif isinstance(current, types.DictionaryType):
|
||||
current[path[-1]] = value
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
@staticmethod
|
||||
def _get_relative_position(path, context):
|
||||
position = context['__dataSource_currentPosition'] or []
|
||||
|
||||
index = 0
|
||||
for c in path:
|
||||
if c == ':':
|
||||
if len(position) > 0:
|
||||
position = position[:-1]
|
||||
elif c == '/':
|
||||
position = []
|
||||
else:
|
||||
break
|
||||
|
||||
index += 1
|
||||
|
||||
return position, path[index:]
|
||||
|
||||
@staticmethod
|
||||
def _correct_position(path, context):
|
||||
position, suffix = Workflow._get_relative_position(path, context)
|
||||
|
||||
if not suffix:
|
||||
return position
|
||||
else:
|
||||
return position + suffix.split('.')
|
||||
|
||||
@staticmethod
|
||||
def _select_func(context, path='', source=None, default=None, **kwargs):
|
||||
|
||||
result = None
|
||||
if path.startswith('##'):
|
||||
config = context['/config']
|
||||
result = config[path[2:]]
|
||||
elif path.startswith('#'):
|
||||
result = context[path[1:]]
|
||||
elif source is not None:
|
||||
if path == '':
|
||||
p = []
|
||||
else:
|
||||
p = path.split('.')
|
||||
result = Workflow._get_path(context[source], p)
|
||||
else:
|
||||
result = Workflow._get_path(
|
||||
context['/dataSource'],
|
||||
Workflow._correct_position(path, context))
|
||||
|
||||
if not result and default is not None:
|
||||
result = default
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _set_func(path, context, body, engine, target=None, **kwargs):
|
||||
def secure_data(*args):
|
||||
return TokenSanitizer().sanitize(args)
|
||||
|
||||
body_data = engine.evaluate_content(body, context)
|
||||
|
||||
if path.startswith('##'):
|
||||
raise RuntimeError('Cannot modify config from XML-code')
|
||||
elif path.startswith('#'):
|
||||
context_path = ':' + path[1:]
|
||||
log.debug(
|
||||
"Setting context variable '{0}' to '{1}'".format(
|
||||
*secure_data(context_path, body_data)))
|
||||
context[context_path] = body_data
|
||||
return
|
||||
if target:
|
||||
data = context[target]
|
||||
position = path.split('.')
|
||||
if Workflow._get_path(data, position) != body_data:
|
||||
log.debug("Setting '{0}' to '{1}'".format(
|
||||
*secure_data(path, body_data)))
|
||||
Workflow._set_path(data, position, body_data)
|
||||
context['/hasSideEffects'] = True
|
||||
|
||||
else:
|
||||
data = context['/dataSource']
|
||||
new_position = Workflow._correct_position(path, context)
|
||||
if Workflow._get_path(data, new_position) != body_data:
|
||||
log.debug("Setting '{0}' to '{1}'".format(
|
||||
*secure_data(path, body_data)))
|
||||
Workflow._set_path(data, new_position, body_data)
|
||||
context['/hasSideEffects'] = True
|
||||
|
||||
@staticmethod
|
||||
def _mute_func(context, rule=None, id=None, **kwargs):
|
||||
if rule is None:
|
||||
rule = context['__currentRuleId']
|
||||
if id is None:
|
||||
id = context['__dataSource_currentObj_id']
|
||||
if id == '#':
|
||||
id = None
|
||||
|
||||
if rule is not None and id is not None:
|
||||
blacklist = context['/__blacklist']
|
||||
blacklist[(rule, id)] = False
|
||||
|
||||
@staticmethod
|
||||
def _unmute_func(context, rule=None, id=None, **kwargs):
|
||||
if rule is None:
|
||||
rule = context['__currentRuleId']
|
||||
if id is None:
|
||||
id = context['__dataSource_currentObj_id']
|
||||
if id == '#':
|
||||
id = None
|
||||
|
||||
if rule is not None and id is not None:
|
||||
blacklist = context['/__blacklist']
|
||||
if (rule, id) in blacklist:
|
||||
del blacklist[(rule, id)]
|
||||
|
||||
@staticmethod
|
||||
def _rule_func(match, context, body, engine, limit=0, id=None, desc=None,
|
||||
**kwargs):
|
||||
if not id:
|
||||
id = str(object_id(body))
|
||||
parent_rule_id = context['__currentRuleId']
|
||||
full_rule_id = id if not parent_rule_id \
|
||||
else '{0}/{1}'.format(parent_rule_id, id)
|
||||
context['__currentRuleId'] = full_rule_id
|
||||
position, match = Workflow._get_relative_position(match, context)
|
||||
if not desc:
|
||||
desc = match
|
||||
data = Workflow._get_path(context['/dataSource'], position)
|
||||
match = re.sub(r'@\.([\w.]+)',
|
||||
r"Workflow._get_path(@, '\1'.split('.'))", match)
|
||||
match = match.replace('$.', '$[*].')
|
||||
selected = jsonpath.jsonpath([data], match, 'IPATH') or []
|
||||
index = 0
|
||||
blacklist = context['/__blacklist']
|
||||
parent_object_id = context['__dataSource_currentObj_id']
|
||||
for found_match in selected:
|
||||
if 0 < int(limit) <= index:
|
||||
break
|
||||
index += 1
|
||||
new_position = position + found_match[1:]
|
||||
context['__dataSource_currentPosition'] = new_position
|
||||
cur_obj = Workflow._get_path(context['/dataSource'], new_position)
|
||||
|
||||
current_object_id = '#'
|
||||
if isinstance(cur_obj, dict) and ('id' in cur_obj) and \
|
||||
parent_object_id != '#':
|
||||
current_object_id = cur_obj['id'] if not parent_object_id \
|
||||
else '{0}/{1}'.format(parent_object_id, cur_obj['id'])
|
||||
if (full_rule_id, current_object_id) in blacklist:
|
||||
continue
|
||||
|
||||
context['__dataSource_currentObj'] = cur_obj
|
||||
context['__dataSource_currentObj_id'] = current_object_id
|
||||
secure_obj = TokenSanitizer().sanitize(cur_obj)
|
||||
log.debug("Rule '{0}' with ID = {2} matches on '{1}'"
|
||||
.format(desc, secure_obj, full_rule_id))
|
||||
if current_object_id != '#':
|
||||
log.debug('Muting {0} in rule {1}'.format(
|
||||
current_object_id, full_rule_id))
|
||||
blacklist[(full_rule_id, current_object_id)] = True
|
||||
for element in body:
|
||||
if element.tag == 'empty':
|
||||
continue
|
||||
engine.evaluate(element, context)
|
||||
if element.tag == 'rule' and context['/hasSideEffects']:
|
||||
break
|
||||
if not index:
|
||||
empty_handler = body.find('empty')
|
||||
if empty_handler is not None:
|
||||
log.debug("Running empty handler for rule '{0}'".format(desc))
|
||||
engine.evaluate_content(empty_handler, context)
|
||||
|
||||
@staticmethod
|
||||
def _select_all_func(context, path='', source=None, limit=0, **kwargs):
|
||||
if not source:
|
||||
position, path = Workflow._get_relative_position(path, context)
|
||||
source = Workflow._get_path(context['/dataSource'], position)
|
||||
result = jsonpath.jsonpath(source, path) or []
|
||||
return result if not limit else result[:limit]
|
||||
|
||||
@staticmethod
|
||||
def _select_single_func(context, path='', source=None, **kwargs):
|
||||
result = Workflow._select_all_func(context, path, source, **kwargs)
|
||||
return result[0] if len(result) >= 1 else None
|
||||
|
||||
@staticmethod
|
||||
def _workflow_func(context, body, engine, **kwargs):
|
||||
context['/hasSideEffects'] = False
|
||||
for element in body:
|
||||
engine.evaluate(element, context)
|
||||
if element.tag == 'rule' and context['/hasSideEffects']:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _stop_func(context, body, engine, **kwargs):
|
||||
if not 'temp' in context['/dataSource']:
|
||||
context['/dataSource']['temp'] = {}
|
||||
|
||||
context['/dataSource']['temp']['_stop_requested'] = True
|
||||
|
||||
|
||||
def format_error(context, error, **kwargs):
|
||||
error_data = context[error]
|
||||
return anyjson.dumps(error_data)
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._rule_func, 'rule')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._workflow_func, 'workflow')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._set_func, 'set')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._select_func, 'select')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._stop_func, 'stop')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._select_all_func, 'select-all')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
format_error, "format-error")
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._select_single_func, 'select-single')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._mute_func, 'mute')
|
||||
|
||||
xml_code_engine.XmlCodeEngine.register_function(
|
||||
Workflow._unmute_func, 'unmute')
|
@ -1,143 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import uuid
|
||||
|
||||
import xml.etree.ElementTree as etree
|
||||
import types
|
||||
|
||||
import function_context
|
||||
|
||||
|
||||
class XmlCodeEngine(object):
|
||||
_functionMap = {}
|
||||
|
||||
def __init__(self):
|
||||
self._document = None
|
||||
|
||||
def load(self, file_obj):
|
||||
self._document = etree.parse(file_obj)
|
||||
|
||||
@staticmethod
|
||||
def register_function(func, name):
|
||||
XmlCodeEngine._functionMap[name] = func
|
||||
|
||||
def _execute_function(self, name, element, parent_context):
|
||||
if name == 'parameter':
|
||||
return None
|
||||
|
||||
if name not in self._functionMap:
|
||||
raise KeyError('Unknown function %s' % name)
|
||||
|
||||
definition = self._functionMap[name]
|
||||
context = function_context.Context(parent_context)
|
||||
args = {'engine': self, 'body': element, 'context': context}
|
||||
|
||||
for key, value in element.items():
|
||||
args[key] = value
|
||||
|
||||
for parameter in element.findall('parameter'):
|
||||
args[parameter.get('name')] = self.evaluate_content(
|
||||
parameter, context)
|
||||
|
||||
return definition(**args)
|
||||
|
||||
def evaluate(self, element, parent_context):
|
||||
return self._execute_function(element.tag, element, parent_context)
|
||||
|
||||
def evaluate_content(self, element, context):
|
||||
parts = [element.text or '']
|
||||
do_strip = False
|
||||
for sub_element in element:
|
||||
if sub_element.tag == 'parameter':
|
||||
continue
|
||||
do_strip = True
|
||||
parts.append(self._execute_function(
|
||||
sub_element.tag, sub_element, context))
|
||||
parts.append(sub_element.tail or '')
|
||||
|
||||
result = []
|
||||
|
||||
for t in parts:
|
||||
if not isinstance(t, types.StringTypes):
|
||||
result.append(t)
|
||||
|
||||
return_value = result
|
||||
if len(result) == 0:
|
||||
return_value = ''.join(parts)
|
||||
if do_strip:
|
||||
return_value = return_value.strip()
|
||||
elif len(result) == 1:
|
||||
return_value = result[0]
|
||||
|
||||
return return_value
|
||||
|
||||
def execute(self, parent_context=None):
|
||||
root = self._document.getroot()
|
||||
return self.evaluate(root, parent_context)
|
||||
|
||||
|
||||
def _dict_func(engine, body, context, **kwargs):
|
||||
result = {}
|
||||
for item in body:
|
||||
key = item.get('name')
|
||||
value = engine.evaluate_content(item, context)
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
def _array_func(engine, body, context, **kwargs):
|
||||
result = []
|
||||
for item in body:
|
||||
result.append(engine.evaluate(item, context))
|
||||
return result
|
||||
|
||||
|
||||
def _text_func(engine, body, context, **kwargs):
|
||||
return str(engine.evaluate_content(body, context))
|
||||
|
||||
|
||||
def _int_func(engine, body, context, **kwargs):
|
||||
return int(engine.evaluate_content(body, context))
|
||||
|
||||
|
||||
def _function_func(engine, body, context, **kwargs):
|
||||
return lambda: engine.evaluate_content(body, context)
|
||||
|
||||
|
||||
def _null_func(**kwargs):
|
||||
return None
|
||||
|
||||
|
||||
def _true_func(**kwargs):
|
||||
return True
|
||||
|
||||
|
||||
def _false_func(**kwargs):
|
||||
return False
|
||||
|
||||
|
||||
def _gen_id(**kwargs):
|
||||
return uuid.uuid4().hex
|
||||
|
||||
|
||||
XmlCodeEngine.register_function(_dict_func, "map")
|
||||
XmlCodeEngine.register_function(_array_func, "list")
|
||||
XmlCodeEngine.register_function(_text_func, "text")
|
||||
XmlCodeEngine.register_function(_int_func, "int")
|
||||
XmlCodeEngine.register_function(_function_func, "function")
|
||||
XmlCodeEngine.register_function(_null_func, "null")
|
||||
XmlCodeEngine.register_function(_true_func, "true")
|
||||
XmlCodeEngine.register_function(_false_func, "false")
|
||||
XmlCodeEngine.register_function(_gen_id, "uuid")
|
@ -1,23 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from openstack-common
|
||||
module=exception
|
||||
module=gettextutils
|
||||
module=importutils
|
||||
module=jsonutils
|
||||
module=log
|
||||
module=xmlutils
|
||||
module=sslutils
|
||||
module=service
|
||||
module=notifier
|
||||
module=local
|
||||
module=install_venv_common
|
||||
module=timeutils
|
||||
module=eventlet_backdoor
|
||||
module=threadgroup
|
||||
module=loopingcall
|
||||
module=fileutils
|
||||
module=lockutils
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=muranoconductor
|
@ -1,19 +0,0 @@
|
||||
pbr>=0.6,<1.0
|
||||
anyjson>=0.3.3
|
||||
eventlet>=0.13.0
|
||||
jsonpath
|
||||
Paste
|
||||
PasteDeploy>=1.5.0
|
||||
iso8601>=0.1.8
|
||||
python-heatclient>=0.2.3
|
||||
jsonschema>=2.0.0,<3.0.0
|
||||
netaddr>=0.7.6
|
||||
|
||||
oslo.config>=1.2.0
|
||||
deep
|
||||
murano-common==0.4.1
|
||||
PyYAML>=3.1.0
|
||||
python-neutronclient>=2.3.4,<3
|
||||
oslo.messaging>=1.3.0a4
|
||||
|
||||
http://tarballs.openstack.org/murano-metadataclient/murano-metadataclient-master.tar.gz#egg=metadataclient-0.4.1
|
49
run_tests.sh
49
run_tests.sh
@ -1,49 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function usage {
|
||||
echo "Usage: $0 [OPTION]..."
|
||||
echo "Run python-portasclient's test suite(s)"
|
||||
echo ""
|
||||
echo " -p, --pep8 Just run pep8"
|
||||
echo " -h, --help Print this usage message"
|
||||
echo ""
|
||||
echo "This script is deprecated and currently retained for compatibility."
|
||||
echo 'You can run the full test suite for multiple environments by running "tox".'
|
||||
echo 'You can run tests for only python 2.7 by running "tox -e py27", or run only'
|
||||
echo 'the pep8 tests with "tox -e pep8".'
|
||||
exit
|
||||
}
|
||||
|
||||
command -v tox > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo 'This script requires "tox" to run.'
|
||||
echo 'You can install it with "pip install tox".'
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
just_pep8=0
|
||||
|
||||
function process_option {
|
||||
case "$1" in
|
||||
-h|--help) usage;;
|
||||
-p|--pep8) let just_pep8=1;;
|
||||
esac
|
||||
}
|
||||
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
done
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
tox -e pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
tox -e py27 $toxargs 2>&1 | tee run_tests.err.log || exit
|
||||
if [ ${PIPESTATUS[0]} -ne 0 ]; then
|
||||
exit ${PIPESTATUS[0]}
|
||||
fi
|
||||
|
||||
if [ -z "$toxargs" ]; then
|
||||
tox -e pep8
|
||||
fi
|
64
setup.cfg
64
setup.cfg
@ -1,64 +0,0 @@
|
||||
[metadata]
|
||||
name = murano-conductor
|
||||
summary = The Conductor is orchestration engine server
|
||||
version = 0.4.1
|
||||
description-file =
|
||||
README.rst
|
||||
license = Apache License, Version 2.0
|
||||
author = Mirantis, Inc.
|
||||
author-email = murano-all@lists.openstack.org
|
||||
home-page = https://launchpad.net/murano
|
||||
classifier =
|
||||
Development Status :: 5 - Production/Stable
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Developers
|
||||
Intended Audience :: Information Technology
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: OS Independent
|
||||
Programming Language :: Python
|
||||
|
||||
[files]
|
||||
packages =
|
||||
muranoconductor
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
muranoconductor = muranoconductor.cmd.run:main
|
||||
|
||||
[build_sphinx]
|
||||
all_files = 1
|
||||
build-dir = doc/build
|
||||
source-dir = doc/source
|
||||
|
||||
[egg_info]
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
|
||||
[compile_catalog]
|
||||
directory = muranoconductor/locale
|
||||
domain = muranoconductor
|
||||
|
||||
[update_catalog]
|
||||
domain = muranoconductor
|
||||
output_dir = muranoconductor/locale
|
||||
input_file = muranoconductor/locale/conductor.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = muranoconductor/locale/conductor.pot
|
||||
|
||||
[nosetests]
|
||||
# NOTE(jkoelker) To run the test suite under nose install the following
|
||||
# coverage http://pypi.python.org/pypi/coverage
|
||||
# tissue http://pypi.python.org/pypi/tissue (pep8 checker)
|
||||
# openstack-nose https://github.com/jkoelker/openstack-nose
|
||||
verbosity=2
|
||||
cover-package = muranoconductor
|
||||
cover-html = true
|
||||
cover-erase = true
|
22
setup.py
22
setup.py
@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
263
setup.sh
263
setup.sh
@ -1,263 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
RUN_DIR=$(cd $(dirname "$0") && pwd)
|
||||
INC_FILE="$RUN_DIR/common.inc"
|
||||
if [ -f "$INC_FILE" ]; then
|
||||
source "$INC_FILE"
|
||||
else
|
||||
echo "Can't load \"$INC_FILE\" or file not found, exiting!"
|
||||
exit 1
|
||||
fi
|
||||
#
|
||||
DAEMON_NAME="murano-conductor"
|
||||
DAEMON_USER="murano"
|
||||
DAEMON_GROUP="murano"
|
||||
DAEMON_CFG_DIR="/etc/murano"
|
||||
DAEMON_CACHE_DIR="/var/cache/murano"
|
||||
DAEMON_LOG_DIR="/var/log/murano"
|
||||
LOGFILE="/tmp/${DAEMON_NAME}_install.log"
|
||||
DAEMON_DB_CONSTR="sqlite:///$DAEMON_CFG_DIR/$DAEMON_NAME.sqlite"
|
||||
common_pkgs="wget git make gcc python-pip python-setuptools dos2unix"
|
||||
# Distro-specific package namings
|
||||
debian_pkgs="python-dev python-mysqldb libxml2-dev libxslt1-dev libffi-dev libssl-dev"
|
||||
redhat_pkgs="python-devel MySQL-python libxml2-devel libxslt-devel libffi-devel openssl-devel"
|
||||
#
|
||||
get_os
|
||||
eval req_pkgs="\$$(lowercase $DISTRO_BASED_ON)_pkgs"
|
||||
REQ_PKGS="$common_pkgs $req_pkgs"
|
||||
|
||||
function install_prerequisites()
|
||||
{
|
||||
retval=0
|
||||
_dist=$(lowercase $DISTRO_BASED_ON)
|
||||
if [ $_dist = "redhat" ]; then
|
||||
yum repolist | grep -qoE "epel"
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Enabling EPEL6..."
|
||||
rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm >> $LOGFILE 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "... can't enable EPEL6, exiting!"
|
||||
retval=1
|
||||
return $retval
|
||||
fi
|
||||
fi
|
||||
yum --quiet makecache
|
||||
fi
|
||||
for pack in $REQ_PKGS
|
||||
do
|
||||
find_or_install "$pack"
|
||||
if [ $? -eq 1 ]; then
|
||||
retval=1
|
||||
break
|
||||
else
|
||||
retval=0
|
||||
fi
|
||||
done
|
||||
return $retval
|
||||
}
|
||||
function make_tarball()
|
||||
{
|
||||
retval=0
|
||||
log "Preparing tarball package..."
|
||||
setuppy="$RUN_DIR/setup.py"
|
||||
if [ -e "$setuppy" ]; then
|
||||
chmod +x $setuppy
|
||||
rm -rf $RUN_DIR/*.egg-info
|
||||
cd $RUN_DIR && python $setuppy egg_info > /dev/null 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
log "...\"$setuppy\" egg info creation fails, exiting!!!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
rm -rf $RUN_DIR/dist/*
|
||||
log "...\"setup.py sdist\" output will be recorded in \"$LOGFILE\""
|
||||
cd $RUN_DIR && $setuppy sdist >> $LOGFILE 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
log "...\"$setuppy\" tarball creation fails, exiting!!!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
#TRBL_FILE=$(basename $(ls $RUN_DIR/dist/*.tar.gz | head -n 1))
|
||||
TRBL_FILE=$(ls $RUN_DIR/dist/*.tar.gz | head -n 1)
|
||||
if [ ! -e "$TRBL_FILE" ]; then
|
||||
log "...tarball not found, exiting!"
|
||||
retval=1
|
||||
else
|
||||
log "...success, tarball created as \"$TRBL_FILE\""
|
||||
retval=0
|
||||
fi
|
||||
else
|
||||
log "...\"$setuppy\" not found, exiting!"
|
||||
retval=1
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function run_pip_install()
|
||||
{
|
||||
find_pip
|
||||
retval=0
|
||||
tarball_file=${1:-$TRBL_FILE}
|
||||
log "Running \"$PIPCMD install $PIPARGS $tarball_file\" output will be recorded in \"$LOGFILE\""
|
||||
$PIPCMD install $PIPARGS $tarball_file >> $LOGFILE 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "...pip install fails, exiting!"
|
||||
retval=1
|
||||
exit 1
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
|
||||
function inject_init()
|
||||
{
|
||||
retval=0
|
||||
_dist=$(lowercase $DISTRO_BASED_ON)
|
||||
eval src_init_sctipt="$DAEMON_NAME-$_dist"
|
||||
_initscript="$DAEMON_NAME"
|
||||
cp -f "$RUN_DIR/etc/init.d/$src_init_sctipt" "/etc/init.d/$_initscript" || retval=$?
|
||||
chmod +x "/etc/init.d/$_initscript" || retval=$?
|
||||
iniset '' 'SYSTEM_USER' "$DAEMON_USER" "/etc/init.d/$_initscript"
|
||||
iniset '' 'DAEMON' "$(shslash $SERVICE_EXEC_PATH)" "/etc/init.d/$_initscript"
|
||||
iniset '' 'SCRIPTNAME' "$(shslash "/etc/init.d/$_initscript")" "/etc/init.d/$_initscript"
|
||||
case $_dist in
|
||||
"debian")
|
||||
update-rc.d $_initscript defaults || retval=$?
|
||||
update-rc.d $_initscript enable || retval=$?
|
||||
;;
|
||||
*)
|
||||
chkconfig --add $_initscript || retval=$?
|
||||
chkconfig $_initscript on || retval=$?
|
||||
;;
|
||||
esac
|
||||
return $retval
|
||||
}
|
||||
function purge_init()
|
||||
{
|
||||
retval=0
|
||||
_dist=$(lowercase $DISTRO_BASED_ON)
|
||||
_initscript="$DAEMON_NAME"
|
||||
service $_initscript stop
|
||||
if [ $? -ne 0 ]; then
|
||||
retval=1
|
||||
fi
|
||||
case $_dist in
|
||||
"debian")
|
||||
update-rc.d $_initscript disable
|
||||
update-rc.d -f $_initscript remove || retval=$?
|
||||
;;
|
||||
*)
|
||||
chkconfig $_initscript off || retval=$?
|
||||
chkconfig --del $_initscript || retval=$?
|
||||
;;
|
||||
esac
|
||||
rm -f "/etc/init.d/$_initscript" || retval=$?
|
||||
return $retval
|
||||
}
|
||||
function run_pip_uninstall()
|
||||
{
|
||||
find_pip
|
||||
retval=0
|
||||
pack_to_del=$(is_py_package_installed "$DAEMON_NAME")
|
||||
if [ $? -eq 0 ]; then
|
||||
log "Running \"$PIPCMD uninstall $PIPARGS $DAEMON_NAME\" output will be recorded in \"$LOGFILE\""
|
||||
$PIPCMD uninstall $pack_to_del --yes >> $LOGFILE 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "...can't uninstall $DAEMON_NAME with $PIPCMD"
|
||||
retval=1
|
||||
else
|
||||
log "...success"
|
||||
fi
|
||||
else
|
||||
log "Python package for \"$DAEMON_NAME\" not found"
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
function install_daemon()
|
||||
{
|
||||
install_prerequisites || exit 1
|
||||
make_tarball || exit $?
|
||||
run_pip_install || exit $?
|
||||
add_daemon_credentials "$DAEMON_USER" "$DAEMON_GROUP" || exit $?
|
||||
log "Creating required directories..."
|
||||
mk_dir "$DAEMON_CFG_DIR" "$DAEMON_USER" "$DAEMON_GROUP" || exit 1
|
||||
mk_dir "$DAEMON_CACHE_DIR" "$DAEMON_USER" "$DAEMON_GROUP" || exit 1
|
||||
mk_dir "$DAEMON_LOG_DIR" "$DAEMON_USER" "$DAEMON_GROUP" || exit 1
|
||||
log "Making sample configuration files at \"$DAEMON_CFG_DIR\""
|
||||
_src_conf_dir="$RUN_DIR/etc/murano"
|
||||
_prefix="murano-"
|
||||
for file in $(ls $_src_conf_dir)
|
||||
do
|
||||
if [ -d "$_src_conf_dir/$file" ]; then
|
||||
#Dir copy
|
||||
cp -f -r "$_src_conf_dir/$file" "$DAEMON_CFG_DIR/$file"
|
||||
else
|
||||
#cp -f "$_src_conf_dir/$file" "$DAEMON_CFG_DIR/${_prefix}${file}.sample"
|
||||
cp -f "$_src_conf_dir/$file" "$DAEMON_CFG_DIR/$file"
|
||||
config_file=$_prefix$(echo $file | sed -e 's/.sample$//')
|
||||
#removing Cr Lf
|
||||
dos2unix "$DAEMON_CFG_DIR/$file"
|
||||
if [ ! -e "$DAEMON_CFG_DIR/$config_file" ]; then
|
||||
cp -f "$_src_conf_dir/$file" "$DAEMON_CFG_DIR/$config_file"
|
||||
dos2unix "$DAEMON_CFG_DIR/$config_file"
|
||||
else
|
||||
log "\"$DAEMON_CFG_DIR/$config_file\" exists, skipping copy."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
log "Setting log file and sqlite db placement..."
|
||||
iniset 'DEFAULT' 'log_file' "$(shslash $DAEMON_LOG_DIR/$DAEMON_NAME.log)" "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
iniset 'DEFAULT' 'verbose' 'True' "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
iniset 'DEFAULT' 'debug' 'True' "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
iniset 'DEFAULT' 'init_scripts_dir' "$(shslash $DAEMON_CFG_DIR/init-scripts)" "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
iniset 'DEFAULT' 'agent_config_dir' "$(shslash $DAEMON_CFG_DIR/agent-config)" "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
iniset 'DEFAULT' 'data_dir' "$(shslash $DAEMON_CACHE_DIR/muranoconductor-data)" "$DAEMON_CFG_DIR/$DAEMON_NAME.conf"
|
||||
log "Searching daemon in \$PATH..."
|
||||
OLD_DAEMON_NAME=$DAEMON_NAME
|
||||
#murano-conductor->muranoconductor
|
||||
DAEMON_NAME=$(echo $DAEMON_NAME | tr -d '-')
|
||||
get_service_exec_path || exit $?
|
||||
DAEMON_NAME=$OLD_DAEMON_NAME
|
||||
log "...found at \"$SERVICE_EXEC_PATH\""
|
||||
log "Installing SysV init script."
|
||||
inject_init || exit $?
|
||||
log "Everything done, please, verify \"$DAEMON_CFG_DIR/$DAEMON_NAME.conf\", service created as \"${DAEMON_NAME}\"."
|
||||
}
|
||||
function uninstall_daemon()
|
||||
{
|
||||
log "Removing SysV init script..."
|
||||
purge_init || exit $?
|
||||
remove_daemon_credentials "$DAEMON_USER" "$DAEMON_GROUP" || exit $?
|
||||
run_pip_uninstall || exit $?
|
||||
log "Software uninstalled, daemon configuration files and logs located at \"$DAEMON_CFG_DIR\" and \"$DAEMON_LOG_DIR\"."
|
||||
}
|
||||
# Command line args'
|
||||
COMMAND="$1"
|
||||
case $COMMAND in
|
||||
install)
|
||||
rm -rf $LOGFILE
|
||||
log "Installing \"$DAEMON_NAME\" to system..."
|
||||
install_daemon
|
||||
;;
|
||||
|
||||
uninstall )
|
||||
log "Uninstalling \"$DAEMON_NAME\" from system..."
|
||||
uninstall_daemon
|
||||
;;
|
||||
|
||||
* )
|
||||
echo -e "Usage: $(basename "$0") [command] \nCommands:\n\tinstall - Install \"$DAEMON_NAME\" software\n\tuninstall - Uninstall \"$DAEMON_NAME\" software"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
@ -1,9 +0,0 @@
|
||||
unittest2
|
||||
mock>=1.0
|
||||
nose
|
||||
nose-exclude
|
||||
nosexcover
|
||||
#openstack.nose_plugin
|
||||
pep8
|
||||
sphinx>=1.1.2
|
||||
mockfs
|
@ -1,14 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,14 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,42 +0,0 @@
|
||||
{
|
||||
"name": "MyDataCenter",
|
||||
"id": "adc6d143f9584d10808c7ef4d07e4802",
|
||||
"token": "token",
|
||||
"services": {
|
||||
"activeDirectories": [
|
||||
{
|
||||
"id": "9571747991184642B95F430A014616F9",
|
||||
"domain": "acme.loc",
|
||||
"adminPassword": "SuperP@ssw0rd!",
|
||||
"units": [
|
||||
{
|
||||
"id": "273c9183b6e74c9c9db7fdd532c5eb25",
|
||||
"name": "dc01",
|
||||
"isMaster": true,
|
||||
"recoveryPassword": "2SuperP@ssw0rd2"
|
||||
},
|
||||
{
|
||||
"id": "377c6f16d17a416791f80724dab360c6",
|
||||
"name": "dc02",
|
||||
"isMaster": false,
|
||||
"adminPassword": "SuperP@ssw0rd",
|
||||
"recoveryPassword": "2SuperP@ssw0rd2"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"webServers": [
|
||||
{
|
||||
"id": "e9657ceef84a4e669e31795040080262",
|
||||
"domain": "acme.loc",
|
||||
"units": [
|
||||
{
|
||||
"id": "e6f9cfd07ced48fba64e6bd9e65aba64",
|
||||
"name": "iis01",
|
||||
"adminPassword": "SuperP@ssw0rd"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
import mockfs
|
||||
import heatclient.exc
|
||||
|
||||
from muranoconductor.commands.cloud_formation import HeatExecutor
|
||||
|
||||
|
||||
class TestHeatExecutor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.mfs = mockfs.replace_builtins()
|
||||
template = {
|
||||
"$name": {
|
||||
"$key": "$value"
|
||||
}
|
||||
}
|
||||
self.metadata_id = 'b5bbea94023083e1ee06a52af5663b15c1fb1b7c'
|
||||
self.mfs.add_entries({
|
||||
'./{0}/templates/cf/test.template'.format(self.metadata_id):
|
||||
json.dumps(template)})
|
||||
|
||||
def tearDown(self):
|
||||
mockfs.restore_builtins()
|
||||
|
||||
def _init(self, config_mock, ksclient_mock):
|
||||
config_mock.heat.auth_url = 'http://invalid.url'
|
||||
|
||||
auth_data = ksclient_mock().tokens.authenticate()
|
||||
auth_data.id = '123456'
|
||||
auth_data.serviceCatalog = [{
|
||||
'name': 'heat',
|
||||
'endpoints': [{'publicURL': 'http://invalid.heat.url'}]
|
||||
}]
|
||||
|
||||
@mock.patch('heatclient.v1.client.Client')
|
||||
@mock.patch('keystoneclient.v2_0.client.Client')
|
||||
@mock.patch('muranoconductor.config.CONF')
|
||||
def test_create_stack(self, config_mock, ksclient_mock, heat_mock):
|
||||
self._init(config_mock, ksclient_mock)
|
||||
reporter = mock.MagicMock()
|
||||
executor = HeatExecutor('stack', 'token', 'tenant_id', reporter)
|
||||
callback = mock.MagicMock()
|
||||
|
||||
executor.execute(
|
||||
template='test',
|
||||
command='CreateOrUpdate',
|
||||
mappings={
|
||||
'name': 'testName',
|
||||
'key': 'testKey',
|
||||
'value': 'testValue'},
|
||||
arguments={
|
||||
'arg1': 'arg1Value',
|
||||
'arg2': 'arg2Value'},
|
||||
callback=callback,
|
||||
metadata_id=self.metadata_id)
|
||||
|
||||
heat_mock().stacks.get().stack_status = 'CREATE_COMPLETE'
|
||||
heat_mock().stacks.template = mock.MagicMock(
|
||||
side_effect=heatclient.exc.HTTPNotFound)
|
||||
|
||||
self.assertTrue(executor.has_pending_commands())
|
||||
result = executor.execute_pending()
|
||||
self.assertTrue(result)
|
||||
heat_mock().stacks.create.assert_called_with(
|
||||
stack_name='stack',
|
||||
parameters={
|
||||
'arg1': 'arg1Value',
|
||||
'arg2': 'arg2Value'},
|
||||
template={
|
||||
"testName": {
|
||||
"testKey": "testValue"
|
||||
}
|
||||
},
|
||||
disable_rollback=False)
|
||||
callback.assert_called_with({})
|
||||
|
||||
@mock.patch('heatclient.v1.client.Client')
|
||||
@mock.patch('keystoneclient.v2_0.client.Client')
|
||||
@mock.patch('muranoconductor.config.CONF')
|
||||
def test_update_stack(self, config_mock, ksclient_mock, heat_mock):
|
||||
self._init(config_mock, ksclient_mock)
|
||||
reporter = mock.MagicMock()
|
||||
executor = HeatExecutor('stack', 'token', 'tenant_id', reporter)
|
||||
callback = mock.MagicMock()
|
||||
|
||||
executor.execute(
|
||||
template='test',
|
||||
command='CreateOrUpdate',
|
||||
mappings={
|
||||
'name': 'testName',
|
||||
'key': 'testKey',
|
||||
'value': 'testValue'},
|
||||
arguments={
|
||||
'arg1': 'arg1Value',
|
||||
'arg2': 'arg2Value'},
|
||||
callback=callback,
|
||||
metadata_id=self.metadata_id)
|
||||
|
||||
get_mock = heat_mock().stacks.get()
|
||||
get_mock.stack_name = 'stack'
|
||||
get_mock.id = 'stack'
|
||||
get_mock.parameters = {}
|
||||
get_mock.stack_status = ''
|
||||
get_mock._status_index = 0
|
||||
|
||||
def side_effect(*args, **kwargs):
|
||||
if get_mock._status_index < 2:
|
||||
get_mock.stack_status = 'IN_PROGRESS'
|
||||
else:
|
||||
get_mock.stack_status = 'UPDATE_COMPLETE'
|
||||
get_mock._status_index += 1
|
||||
return get_mock
|
||||
|
||||
heat_mock().stacks.get = mock.MagicMock(side_effect=side_effect)
|
||||
heat_mock().stacks.template = mock.MagicMock(
|
||||
return_value={'instance': {}})
|
||||
|
||||
self.assertTrue(executor.has_pending_commands())
|
||||
result = executor.execute_pending()
|
||||
self.assertTrue(result)
|
||||
heat_mock().stacks.update.assert_called_with(
|
||||
stack_id='stack',
|
||||
parameters={
|
||||
'arg1': 'arg1Value',
|
||||
'arg2': 'arg2Value'},
|
||||
template={
|
||||
"testName": {
|
||||
"testKey": "testValue"
|
||||
}
|
||||
})
|
||||
callback.assert_called_with({})
|
||||
|
||||
@mock.patch('heatclient.v1.client.Client')
|
||||
@mock.patch('keystoneclient.v2_0.client.Client')
|
||||
@mock.patch('muranoconductor.config.CONF')
|
||||
def test_delete_stack(self, config_mock, ksclient_mock, heat_mock):
|
||||
self._init(config_mock, ksclient_mock)
|
||||
reporter = mock.MagicMock()
|
||||
executor = HeatExecutor('stack', 'token', 'tenant_id', reporter)
|
||||
callback = mock.MagicMock()
|
||||
|
||||
executor.execute(
|
||||
template='test',
|
||||
command='Delete',
|
||||
callback=callback)
|
||||
|
||||
heat_mock().stacks.get = mock.MagicMock(
|
||||
side_effect=heatclient.exc.HTTPNotFound)
|
||||
|
||||
self.assertTrue(executor.has_pending_commands())
|
||||
result = executor.execute_pending()
|
||||
self.assertTrue(result)
|
||||
heat_mock().stacks.delete.assert_called_with(stack_id='stack')
|
||||
callback.assert_called_with(True)
|
@ -1,27 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from muranoconductor.app import ConductorWorkflowService
|
||||
from muranoconductor.workflow import Workflow
|
||||
import muranoconductor.xml_code_engine as engine
|
||||
|
||||
|
||||
class TestMethodsAndClasses(unittest.TestCase):
|
||||
def test_init_service_class(self):
|
||||
con = ConductorWorkflowService()
|
||||
|
||||
con.start()
|
||||
con.stop()
|
@ -1,71 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import mock
|
||||
import mockfs
|
||||
import json
|
||||
|
||||
from muranoconductor.commands.vm_agent import VmAgentExecutor
|
||||
|
||||
|
||||
class TestVmAgent(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.mfs = mockfs.replace_builtins()
|
||||
self.template = {
|
||||
"Scripts": [
|
||||
"Get-DnsListeningIpAddress.ps1",
|
||||
"Join-Domain.ps1"
|
||||
],
|
||||
"Commands": [
|
||||
{
|
||||
"Name": "Get-DnsListeningIpAddress",
|
||||
"Arguments": {}
|
||||
}],
|
||||
"RebootOnCompletion": 0
|
||||
}
|
||||
self.metadata_id = 'a8571e3b1ba6b33f6c7dbe0f81217c5070377abe'
|
||||
|
||||
self.mfs.add_entries({
|
||||
'./a8571e3b1ba6b33f6c7dbe0f81217c5070377abe/'
|
||||
'templates/agent/test.template':
|
||||
json.dumps(self.template),
|
||||
|
||||
'./a8571e3b1ba6b33f6c7dbe0f81217c5070377abe/'
|
||||
'templates/agent/scripts/Get-DnsListeningIpAddress.ps1':
|
||||
'function GetDNSip(){\ntest\n}\n',
|
||||
|
||||
'./a8571e3b1ba6b33f6c7dbe0f81217c5070377abe/'
|
||||
'templates/agent/scripts/Join-Domain.ps1':
|
||||
'function JoinDomain(){\ntest\n}\n',
|
||||
})
|
||||
self.template_path = './a8571e3b1ba6b33f6c7dbe0f81217c5070377abe/' \
|
||||
'templates/agent/test.template'
|
||||
|
||||
def test_script_encode(self):
|
||||
stack = mock.MagicMock()
|
||||
rmqclient = mock.MagicMock()
|
||||
reporter = mock.MagicMock()
|
||||
rmqclient.declare = mock.Mock()
|
||||
|
||||
executor = VmAgentExecutor(stack, rmqclient, reporter)
|
||||
result, plan_id = executor.build_execution_plan(
|
||||
self.template_path)
|
||||
encoded = [
|
||||
'ZnVuY3Rpb24gR2V0RE5TaXAoKXsKdGVzdAp9Cg==\n',
|
||||
'ZnVuY3Rpb24gSm9pbkRvbWFpbigpewp0ZXN0Cn0K\n'
|
||||
]
|
||||
self.assertEqual(result['Scripts'], encoded,
|
||||
'Encoded script is incorrect')
|
@ -1,25 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from muranoconductor.app import ConductorWorkflowService
|
||||
from muranoconductor.openstack.common import service
|
||||
|
||||
|
||||
class TestMethodsAndClasses(unittest.TestCase):
|
||||
def test_init_service_class(self):
|
||||
launcher = service.ServiceLauncher()
|
||||
con = ConductorWorkflowService()
|
||||
launcher.launch_service(con)
|
@ -1,100 +0,0 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import deep
|
||||
import mock
|
||||
import mockfs
|
||||
|
||||
from muranoconductor.workflow import Workflow
|
||||
|
||||
|
||||
def load_sample(name):
|
||||
with mockfs.storage.original_open(os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
'sample_data',
|
||||
name)) as sample_file:
|
||||
return sample_file.read()
|
||||
|
||||
|
||||
class TestWorkflow(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.mfs = mockfs.replace_builtins()
|
||||
self.model = json.loads(load_sample('objectModel1.json'))
|
||||
self.original_model = json.loads(load_sample('objectModel1.json'))
|
||||
self.metadata_id = 'b5bbea94023083e1ee06a52af5663b15c1fb1b7c'
|
||||
|
||||
def tearDown(self):
|
||||
mockfs.restore_builtins()
|
||||
|
||||
def _execute_workflow(self, xml):
|
||||
self.mfs.add_entries({'test': xml})
|
||||
stub = mock.MagicMock()
|
||||
stub.side_effect = RuntimeError
|
||||
workflow = Workflow('test', self.model, stub,
|
||||
stub, stub, self.metadata_id)
|
||||
workflow.execute()
|
||||
|
||||
def test_empty_workflow_leaves_object_model_unchanged(self):
|
||||
xml = '<workflow/>'
|
||||
self._execute_workflow(xml)
|
||||
self.assertTrue(deep.diff(self.original_model, self.model) is None)
|
||||
|
||||
def test_modifying_object_model_from_workflow(self):
|
||||
xml = '''
|
||||
<workflow>
|
||||
<rule match="$.services[*][?(@.id ==
|
||||
'9571747991184642B95F430A014616F9'
|
||||
and not @.state.invalid)]">
|
||||
<set path="state.invalid">value</set>
|
||||
</rule>
|
||||
</workflow>
|
||||
'''
|
||||
self.assertFalse(
|
||||
'state' in
|
||||
self.model['services']['activeDirectories'][0])
|
||||
|
||||
self._execute_workflow(xml)
|
||||
|
||||
self.assertEqual(
|
||||
self.model['services']['activeDirectories'][0]['state']['invalid'],
|
||||
'value')
|
||||
|
||||
self.assertFalse(deep.diff(self.original_model, self.model) is None)
|
||||
del self.model['services']['activeDirectories'][0]['state']
|
||||
self.assertTrue(deep.diff(self.original_model, self.model) is None)
|
||||
|
||||
def test_selecting_properties_from_object_model_within_workflow(self):
|
||||
xml = '''
|
||||
<workflow>
|
||||
<rule match="$.services[*][?(@.id ==
|
||||
'9571747991184642B95F430A014616F9'
|
||||
and not @.test)]">
|
||||
<set path="test">
|
||||
Domain <select
|
||||
path="domain"/> with primary DC <select
|
||||
path="units.0.name"/>
|
||||
</set>
|
||||
</rule>
|
||||
</workflow>
|
||||
'''
|
||||
|
||||
self._execute_workflow(xml)
|
||||
self.assertEqual(
|
||||
self.model['services']['activeDirectories'][0]['test'],
|
||||
'Domain acme.loc with primary DC dc01')
|
@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
print_hint() {
|
||||
echo "Try \`${0##*/} --help' for more information." >&2
|
||||
}
|
||||
|
||||
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
|
||||
--long help,base-dir:,package-name:,output-dir: -- "$@")
|
||||
|
||||
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
||||
|
||||
eval set -- "$PARSED_OPTIONS"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
echo "${0##*/} [options]"
|
||||
echo ""
|
||||
echo "options:"
|
||||
echo "-h, --help show brief help"
|
||||
echo "-b, --base-dir=DIR Project base directory (required)"
|
||||
echo "-p, --package-name=NAME Project package name"
|
||||
echo "-o, --output-dir=DIR File output directory"
|
||||
exit 0
|
||||
;;
|
||||
-b|--base-dir)
|
||||
shift
|
||||
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||
shift
|
||||
;;
|
||||
-p|--package-name)
|
||||
shift
|
||||
PACKAGENAME=`echo $1`
|
||||
shift
|
||||
;;
|
||||
-o|--output-dir)
|
||||
shift
|
||||
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z $BASEDIR ] || ! [ -d $BASEDIR ]
|
||||
then
|
||||
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
|
||||
fi
|
||||
|
||||
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
||||
|
||||
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
|
||||
if ! [ -d $OUTPUTDIR ]
|
||||
then
|
||||
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
||||
FILES=$(find $BASEDIR/$PACKAGENAME -type f -name "*.py" ! -path "*/tests/*" \
|
||||
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
||||
|
||||
export EVENTLET_NO_GREENDNS=yes
|
||||
|
||||
MODULEPATH=conductor.openstack.common.config.generator
|
||||
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
||||
python -m $MODULEPATH $FILES > $OUTPUTFILE
|
@ -1,77 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import sys
|
||||
|
||||
import install_venv_common as install_venv # flake8: noqa
|
||||
|
||||
|
||||
def print_help(project, venv, root):
|
||||
help = """
|
||||
%(project)s development environment setup is complete.
|
||||
|
||||
%(project)s development uses virtualenv to track and manage Python
|
||||
dependencies while in development and testing.
|
||||
|
||||
To activate the %(project)s virtualenv for the extent of your current
|
||||
shell session you can run:
|
||||
|
||||
$ source %(venv)s/bin/activate
|
||||
|
||||
Or, if you prefer, you can run commands in the virtualenv on a case by
|
||||
case basis by running:
|
||||
|
||||
$ %(root)s/tools/with_venv.sh <your command>
|
||||
"""
|
||||
print help % dict(project=project, venv=venv, root=root)
|
||||
|
||||
|
||||
def main(argv):
|
||||
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
if os.environ.get('tools_path'):
|
||||
root = os.environ['tools_path']
|
||||
venv = os.path.join(root, '.venv')
|
||||
if os.environ.get('venv'):
|
||||
venv = os.environ['venv']
|
||||
|
||||
pip_requires = os.path.join(root, 'requirements.txt')
|
||||
test_requires = os.path.join(root, 'test-requirements.txt')
|
||||
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||
setup_cfg = ConfigParser.ConfigParser()
|
||||
setup_cfg.read('setup.cfg')
|
||||
project = setup_cfg.get('metadata', 'name')
|
||||
|
||||
install = install_venv.InstallVenv(
|
||||
root, venv, pip_requires, test_requires, py_version, project)
|
||||
options = install.parse_args(argv)
|
||||
install.check_python_version()
|
||||
install.check_dependencies()
|
||||
install.create_virtualenv(no_site_packages=options.no_site_packages)
|
||||
install.install_dependencies()
|
||||
install.post_process()
|
||||
print_help(project, venv, root)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
@ -1,212 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Provides methods needed by installation script for OpenStack development
|
||||
virtual environments.
|
||||
|
||||
Since this script is used to bootstrap a virtualenv from the system's Python
|
||||
environment, it should be kept strictly compatible with Python 2.6.
|
||||
|
||||
Synced in from openstack-common
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class InstallVenv(object):
|
||||
|
||||
def __init__(self, root, venv, requirements,
|
||||
test_requirements, py_version,
|
||||
project):
|
||||
self.root = root
|
||||
self.venv = venv
|
||||
self.requirements = requirements
|
||||
self.test_requirements = test_requirements
|
||||
self.py_version = py_version
|
||||
self.project = project
|
||||
|
||||
def die(self, message, *args):
|
||||
print(message % args, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def check_python_version(self):
|
||||
if sys.version_info < (2, 6):
|
||||
self.die("Need Python Version >= 2.6")
|
||||
|
||||
def run_command_with_code(self, cmd, redirect_output=True,
|
||||
check_exit_code=True):
|
||||
"""Runs a command in an out-of-process shell.
|
||||
|
||||
Returns the output of that command. Working directory is self.root.
|
||||
"""
|
||||
if redirect_output:
|
||||
stdout = subprocess.PIPE
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
|
||||
output = proc.communicate()[0]
|
||||
if check_exit_code and proc.returncode != 0:
|
||||
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
|
||||
return (output, proc.returncode)
|
||||
|
||||
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
|
||||
return self.run_command_with_code(cmd, redirect_output,
|
||||
check_exit_code)[0]
|
||||
|
||||
def get_distro(self):
|
||||
if (os.path.exists('/etc/fedora-release') or
|
||||
os.path.exists('/etc/redhat-release')):
|
||||
return Fedora(
|
||||
self.root, self.venv, self.requirements,
|
||||
self.test_requirements, self.py_version, self.project)
|
||||
else:
|
||||
return Distro(
|
||||
self.root, self.venv, self.requirements,
|
||||
self.test_requirements, self.py_version, self.project)
|
||||
|
||||
def check_dependencies(self):
|
||||
self.get_distro().install_virtualenv()
|
||||
|
||||
def create_virtualenv(self, no_site_packages=True):
|
||||
"""Creates the virtual environment and installs PIP.
|
||||
|
||||
Creates the virtual environment and installs PIP only into the
|
||||
virtual environment.
|
||||
"""
|
||||
if not os.path.isdir(self.venv):
|
||||
print('Creating venv...', end=' ')
|
||||
if no_site_packages:
|
||||
self.run_command(['virtualenv', '-q', '--no-site-packages',
|
||||
self.venv])
|
||||
else:
|
||||
self.run_command(['virtualenv', '-q', self.venv])
|
||||
print('done.')
|
||||
else:
|
||||
print("venv already exists...")
|
||||
pass
|
||||
|
||||
def pip_install(self, *args):
|
||||
self.run_command(['tools/with_venv.sh',
|
||||
'pip', 'install', '--upgrade'] + list(args),
|
||||
redirect_output=False)
|
||||
|
||||
def install_dependencies(self):
|
||||
print('Installing dependencies with pip (this can take a while)...')
|
||||
|
||||
# First things first, make sure our venv has the latest pip and
|
||||
# setuptools.
|
||||
self.pip_install('pip>=1.3')
|
||||
self.pip_install('setuptools')
|
||||
|
||||
self.pip_install('-r', self.requirements)
|
||||
self.pip_install('-r', self.test_requirements)
|
||||
|
||||
def post_process(self):
|
||||
self.get_distro().post_process()
|
||||
|
||||
def parse_args(self, argv):
|
||||
"""Parses command-line arguments."""
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-n', '--no-site-packages',
|
||||
action='store_true',
|
||||
help="Do not inherit packages from global Python "
|
||||
"install")
|
||||
return parser.parse_args(argv[1:])[0]
|
||||
|
||||
|
||||
class Distro(InstallVenv):
|
||||
|
||||
def check_cmd(self, cmd):
|
||||
return bool(self.run_command(['which', cmd],
|
||||
check_exit_code=False).strip())
|
||||
|
||||
def install_virtualenv(self):
|
||||
if self.check_cmd('virtualenv'):
|
||||
return
|
||||
|
||||
if self.check_cmd('easy_install'):
|
||||
print('Installing virtualenv via easy_install...', end=' ')
|
||||
if self.run_command(['easy_install', 'virtualenv']):
|
||||
print('Succeeded')
|
||||
return
|
||||
else:
|
||||
print('Failed')
|
||||
|
||||
self.die('ERROR: virtualenv not found.\n\n%s development'
|
||||
' requires virtualenv, please install it using your'
|
||||
' favorite package management tool' % self.project)
|
||||
|
||||
def post_process(self):
|
||||
"""Any distribution-specific post-processing gets done here.
|
||||
|
||||
In particular, this is useful for applying patches to code inside
|
||||
the venv.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Fedora(Distro):
|
||||
"""This covers all Fedora-based distributions.
|
||||
|
||||
Includes: Fedora, RHEL, CentOS, Scientific Linux
|
||||
"""
|
||||
|
||||
def check_pkg(self, pkg):
|
||||
return self.run_command_with_code(['rpm', '-q', pkg],
|
||||
check_exit_code=False)[1] == 0
|
||||
|
||||
def apply_patch(self, originalfile, patchfile):
|
||||
self.run_command(['patch', '-N', originalfile, patchfile],
|
||||
check_exit_code=False)
|
||||
|
||||
def install_virtualenv(self):
|
||||
if self.check_cmd('virtualenv'):
|
||||
return
|
||||
|
||||
if not self.check_pkg('python-virtualenv'):
|
||||
self.die("Please install 'python-virtualenv'.")
|
||||
|
||||
super(Fedora, self).install_virtualenv()
|
||||
|
||||
def post_process(self):
|
||||
"""Workaround for a bug in eventlet.
|
||||
|
||||
This currently affects RHEL6.1, but the fix can safely be
|
||||
applied to all RHEL and Fedora distributions.
|
||||
|
||||
This can be removed when the fix is applied upstream.
|
||||
|
||||
Nova: https://bugs.launchpad.net/nova/+bug/884915
|
||||
Upstream: https://bitbucket.org/eventlet/eventlet/issue/89
|
||||
RHEL: https://bugzilla.redhat.com/958868
|
||||
"""
|
||||
|
||||
# Install "patch" program if it's not there
|
||||
if not self.check_pkg('patch'):
|
||||
self.die("Please install 'patch'.")
|
||||
|
||||
# Apply the eventlet patch
|
||||
self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
|
||||
'site-packages',
|
||||
'eventlet/green/subprocess.py'),
|
||||
'contrib/redhat-eventlet.patch')
|
@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
TOOLS=`dirname $0`
|
||||
VENV=$TOOLS/../.venv
|
||||
source $VENV/bin/activate && $@
|
58
tox.ini
58
tox.ini
@ -1,58 +0,0 @@
|
||||
[tox]
|
||||
envlist = py26,py27,pep8,pyflakes
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
NOSE_WITH_OPENSTACK=1
|
||||
NOSE_OPENSTACK_COLOR=1
|
||||
NOSE_OPENSTACK_RED=0.05
|
||||
NOSE_OPENSTACK_YELLOW=0.025
|
||||
NOSE_OPENSTACK_SHOW_ELAPSED=1
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = nosetests
|
||||
|
||||
[testenv:pep8]
|
||||
deps = pep8==1.3.3
|
||||
commands = pep8 --repeat --show-source muranoconductor setup.py
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = nosetests --cover-erase --cover-package=muranoconductor --with-xcoverage
|
||||
|
||||
[tox:jenkins]
|
||||
downloadcache = ~/cache/pip
|
||||
|
||||
[testenv:jenkins26]
|
||||
basepython = python2.6
|
||||
setenv = NOSE_WITH_XUNIT=1
|
||||
deps = file://{toxinidir}/.cache.bundle
|
||||
|
||||
[testenv:jenkins27]
|
||||
basepython = python2.7
|
||||
setenv = NOSE_WITH_XUNIT=1
|
||||
deps = file://{toxinidir}/.cache.bundle
|
||||
|
||||
[testenv:jenkinscover]
|
||||
deps = file://{toxinidir}/.cache.bundle
|
||||
setenv = NOSE_WITH_XUNIT=1
|
||||
commands = nosetests --cover-erase --cover-package=muranoconductor --with-xcoverage
|
||||
|
||||
[testenv:jenkinsvenv]
|
||||
deps = file://{toxinidir}/.cache.bundle
|
||||
setenv = NOSE_WITH_XUNIT=1
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:pyflakes]
|
||||
deps = flake8
|
||||
commands = flake8
|
||||
|
||||
[flake8]
|
||||
# H301 one import per line
|
||||
# H302 import only modules
|
||||
ignore = H301,H302,F401
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools
|
Loading…
x
Reference in New Issue
Block a user