Merge "Implements the haproxy amphora agent api server"
This commit is contained in:
commit
6b391eb53c
81
bin/create_certificates.sh
Normal file
81
bin/create_certificates.sh
Normal file
@ -0,0 +1,81 @@
|
||||
#! bin/bash
|
||||
|
||||
# USAGE: <certificate directory> <openssl.cnf (example in etc/certificate)
|
||||
#Those are certificates for testing will be generated
|
||||
#
|
||||
#* ca_01.pem is a certificate authority file
|
||||
#* server.pem combines a key and a cert from this certificate authority
|
||||
#* client.key the client key
|
||||
#* client.pem the client certificate
|
||||
#
|
||||
#You will need to copy them to places the agent_api server/client can find and
|
||||
#specify it in the config.
|
||||
#
|
||||
#Example for client use:
|
||||
#
|
||||
#curl -k -v --key client.key --cacert ca_01.pem --cert client.pem https://0.0.0.0:8443/
|
||||
#
|
||||
#
|
||||
#Notes:
|
||||
#For production use the ca issuing the client certificate and the ca issuing the server cetrificate
|
||||
#need to be different so a hacker can't just use the server certificate from a compromised amphora
|
||||
#to control all the others.
|
||||
#
|
||||
#Sources:
|
||||
#* https://communities.bmc.com/community/bmcdn/bmc_atrium_and_foundation_technologies/
|
||||
#discovery/blog/2014/09/03/the-pulse-create-your-own-personal-ca-with-openssl
|
||||
# This describes how to create a CA and sign requests
|
||||
#* https://www.digitalocean.com/community/tutorials/
|
||||
#openssl-essentials-working-with-ssl-certificates-private-keys-and-csrs -
|
||||
#how to issue csr and much more
|
||||
|
||||
## Create CA
|
||||
|
||||
# Create directories
|
||||
CERT_DIR=$1
|
||||
OPEN_SSL_CONF=$2 # etc/certificates/openssl.cnf
|
||||
|
||||
echo $CERT_DIR
|
||||
|
||||
|
||||
mkdir $CERT_DIR
|
||||
cd $CERT_DIR
|
||||
mkdir newcerts private
|
||||
chmod 700 private
|
||||
|
||||
# prepare files
|
||||
touch index.txt
|
||||
echo 01 > serial
|
||||
|
||||
|
||||
echo "Create the CA's private and public keypair (2k long)"
|
||||
openssl genrsa -passout pass:foobar -des3 -out private/cakey.pem 2048
|
||||
|
||||
echo "You will be asked to enter some information about the certificate."
|
||||
openssl req -x509 -passin pass:foobar -new -nodes -key private/cakey.pem -config $OPEN_SSL_CONF -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -out ca_01.pem
|
||||
|
||||
echo "Here is the certifcate"
|
||||
openssl x509 -in ca_01.pem -text -noout
|
||||
|
||||
|
||||
## Create Server/Client CSR
|
||||
echo "Generate a server key and a CSR"
|
||||
openssl req \
|
||||
-newkey rsa:2048 -nodes -keyout client.key \
|
||||
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" \
|
||||
-out client.csr
|
||||
|
||||
echo "Sign request"
|
||||
openssl ca -passin pass:foobar -config $OPEN_SSL_CONF -in client.csr -out client-.pem -batch
|
||||
|
||||
echo "Generate single pem client.pem"
|
||||
cat client-.pem client.key > client.pem
|
||||
|
||||
echo "Note: For production use the ca issuing the client certificate and the ca issuing the server"
|
||||
echo "certificate need to be different so a hacker can't just use the server certificate from a"
|
||||
echo "compromised amphora to control all the others."
|
||||
echo "\nTo use the certificates copy them to the directory specified in the octavia.conf"
|
||||
|
||||
|
||||
|
||||
|
@ -92,6 +92,17 @@ function octavia_configure {
|
||||
recreate_database_mysql octavia
|
||||
iniset $OCTAVIA_DIR/octavia/db/migration/alembic.ini alembic sqlalchemy.url "mysql+pymysql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_HOST}:3306/octavia"
|
||||
alembic -c $OCTAVIA_DIR/octavia/db/migration/alembic.ini upgrade head
|
||||
|
||||
if [[ -a $OCTAVIA_CERTS_DIR ]] ; then
|
||||
rm -rf $OCTAVIA_CERTS_DIR
|
||||
fi
|
||||
source $OCTAVIA_DIR/bin/create_certificates.sh $OCTAVIA_CERTS_DIR $OCTAVIA_DIR/etc/certificates/openssl.cnf
|
||||
iniset $OCTAVIA_CONF haproxy_amphora client_cert ${OCTAVIA_CERTS_DIR}/client.pem
|
||||
iniset $OCTAVIA_CONF haproxy_amphora server_ca ${OCTAVIA_CERTS_DIR}/ca_01.pem
|
||||
iniset $OCTAVIA_CONF certificates ca_certificate ${OCTAVIA_CERTS_DIR}/ca_01.pem
|
||||
iniset $OCTAVIA_CONF certificates ca_private_key ${OCTAVIA_CERTS_DIR}/private/cakey.pem
|
||||
iniset $OCTAVIA_CONF certificates ca_private_key_passphrase foobar
|
||||
|
||||
}
|
||||
|
||||
function build_mgmt_network {
|
||||
@ -103,6 +114,7 @@ function build_mgmt_network {
|
||||
neutron security-group-create lb-mgmt-sec-grp
|
||||
neutron security-group-rule-create --protocol icmp lb-mgmt-sec-grp
|
||||
neutron security-group-rule-create --protocol tcp --port-range-min 22 --port-range-max 22 lb-mgmt-sec-grp
|
||||
neutron security-group-rule-create --protocol tcp --port-range-min 8443 --port-range-max 8443 lb-mgmt-sec-grp
|
||||
|
||||
OCTAVIA_MGMT_SEC_GRP_ID=$(nova secgroup-list | awk ' / lb-mgmt-sec-grp / {print $2}')
|
||||
iniset ${OCTAVIA_CONF} controller_worker amp_secgroup_list ${OCTAVIA_MGMT_SEC_GRP_ID}
|
||||
|
@ -5,6 +5,7 @@ OCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"}
|
||||
OCTAVIA_BIN_DIR=${OCTAVIA_BIN_DIR:-$(get_python_exec_prefix)}
|
||||
OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"}
|
||||
OCTAVIA_SSH_DIR=${OCTAVIA_SSH_DIR:-${OCTAVIA_CONF_DIR}/.ssh}
|
||||
OCTAVIA_CERTS_DIR=${OCTAVIA_CERTS_DIR:-${OCTAVIA_CONF_DIR}/certs}
|
||||
OCTAVIA_CONF=${OCTAVIA_CONF:-${OCTAVIA_CONF_DIR}/octavia.conf}
|
||||
OCTAVIA_TEMPEST_DIR=${OCTAVIA_TEMPEST_DIR:-${OCTAVIA_DIR}/octavia/tests/tempest}
|
||||
|
||||
|
@ -325,7 +325,7 @@ if [ "$AMP_ROOTPW" ]; then
|
||||
export DIB_PASSWORD=$AMP_ROOTPW
|
||||
fi
|
||||
|
||||
# Add the Octavia Amphora agent element
|
||||
# Add the Octavia Amphora agent.py element
|
||||
AMP_element_sequence="$AMP_element_sequence amphora-agent"
|
||||
|
||||
# Allow full elements override
|
||||
|
13
elements/amphora-agent/install.d/75-run_setup_install
Executable file
13
elements/amphora-agent/install.d/75-run_setup_install
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
install-packages libffi-dev libssl-dev
|
||||
cd /opt/amphora-agent/
|
||||
python setup.py install
|
||||
cp etc/init/octavia-agent.conf /etc/init/
|
||||
mkdir /etc/octavia
|
||||
cp etc/octavia.conf /etc/octavia
|
||||
# we assume certs, etc will come in through the config drive
|
||||
mkdir /etc/octavia/certs
|
||||
mkdir /var/lib/octavia
|
||||
|
@ -1,2 +1,2 @@
|
||||
# This is temporary until we have a pip package
|
||||
amphora-agent git /opt/amphora-agent https://review.openstack.org/stackforge/octavia refs/changes/34/160034/16
|
||||
amphora-agent git /opt/amphora-agent https://review.openstack.org/stackforge/octavia
|
||||
|
350
etc/certificates/openssl.cnf
Normal file
350
etc/certificates/openssl.cnf
Normal file
@ -0,0 +1,350 @@
|
||||
#
|
||||
# OpenSSL example configuration file.
|
||||
# This is mostly being used for generation of certificate requests.
|
||||
#
|
||||
|
||||
# This definition stops the following lines choking if HOME isn't
|
||||
# defined.
|
||||
HOME = .
|
||||
RANDFILE = $ENV::HOME/.rnd
|
||||
|
||||
# Extra OBJECT IDENTIFIER info:
|
||||
#oid_file = $ENV::HOME/.oid
|
||||
oid_section = new_oids
|
||||
|
||||
# To use this configuration file with the "-extfile" option of the
|
||||
# "openssl x509" utility, name here the section containing the
|
||||
# X.509v3 extensions to use:
|
||||
# extensions =
|
||||
# (Alternatively, use a configuration file that has only
|
||||
# X.509v3 extensions in its main [= default] section.)
|
||||
|
||||
[ new_oids ]
|
||||
|
||||
# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.
|
||||
# Add a simple OID like this:
|
||||
# testoid1=1.2.3.4
|
||||
# Or use config file substitution like this:
|
||||
# testoid2=${testoid1}.5.6
|
||||
|
||||
# Policies used by the TSA examples.
|
||||
tsa_policy1 = 1.2.3.4.1
|
||||
tsa_policy2 = 1.2.3.4.5.6
|
||||
tsa_policy3 = 1.2.3.4.5.7
|
||||
|
||||
####################################################################
|
||||
[ ca ]
|
||||
default_ca = CA_default # The default ca section
|
||||
|
||||
####################################################################
|
||||
[ CA_default ]
|
||||
|
||||
dir = ./ # Where everything is kept
|
||||
certs = $dir/certs # Where the issued certs are kept
|
||||
crl_dir = $dir/crl # Where the issued crl are kept
|
||||
database = $dir/index.txt # database index file.
|
||||
#unique_subject = no # Set to 'no' to allow creation of
|
||||
# several ctificates with same subject.
|
||||
new_certs_dir = $dir/newcerts # default place for new certs.
|
||||
|
||||
certificate = $dir/ca_01.pem # The CA certificate
|
||||
serial = $dir/serial # The current serial number
|
||||
crlnumber = $dir/crlnumber # the current crl number
|
||||
# must be commented out to leave a V1 CRL
|
||||
crl = $dir/crl.pem # The current CRL
|
||||
private_key = $dir/private/cakey.pem# The private key
|
||||
RANDFILE = $dir/private/.rand # private random number file
|
||||
|
||||
x509_extensions = usr_cert # The extentions to add to the cert
|
||||
|
||||
# Comment out the following two lines for the "traditional"
|
||||
# (and highly broken) format.
|
||||
name_opt = ca_default # Subject Name options
|
||||
cert_opt = ca_default # Certificate field options
|
||||
|
||||
# Extension copying option: use with caution.
|
||||
# copy_extensions = copy
|
||||
|
||||
# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
|
||||
# so this is commented out by default to leave a V1 CRL.
|
||||
# crlnumber must also be commented out to leave a V1 CRL.
|
||||
# crl_extensions = crl_ext
|
||||
|
||||
default_days = 365 # how long to certify for
|
||||
default_crl_days= 30 # how long before next CRL
|
||||
default_md = default # use public key default MD
|
||||
preserve = no # keep passed DN ordering
|
||||
|
||||
# A few difference way of specifying how similar the request should look
|
||||
# For type CA, the listed attributes must be the same, and the optional
|
||||
# and supplied fields are just that :-)
|
||||
policy = policy_match
|
||||
|
||||
# For the CA policy
|
||||
[ policy_match ]
|
||||
countryName = match
|
||||
stateOrProvinceName = match
|
||||
organizationName = match
|
||||
organizationalUnitName = optional
|
||||
commonName = supplied
|
||||
emailAddress = optional
|
||||
|
||||
# For the 'anything' policy
|
||||
# At this point in time, you must list all acceptable 'object'
|
||||
# types.
|
||||
[ policy_anything ]
|
||||
countryName = optional
|
||||
stateOrProvinceName = optional
|
||||
localityName = optional
|
||||
organizationName = optional
|
||||
organizationalUnitName = optional
|
||||
commonName = supplied
|
||||
emailAddress = optional
|
||||
|
||||
####################################################################
|
||||
[ req ]
|
||||
default_bits = 2048
|
||||
default_keyfile = privkey.pem
|
||||
distinguished_name = req_distinguished_name
|
||||
attributes = req_attributes
|
||||
x509_extensions = v3_ca # The extentions to add to the self signed cert
|
||||
|
||||
# Passwords for private keys if not present they will be prompted for
|
||||
# input_password = secret
|
||||
# output_password = secret
|
||||
|
||||
# This sets a mask for permitted string types. There are several options.
|
||||
# default: PrintableString, T61String, BMPString.
|
||||
# pkix : PrintableString, BMPString (PKIX recommendation before 2004)
|
||||
# utf8only: only UTF8Strings (PKIX recommendation after 2004).
|
||||
# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
|
||||
# MASK:XXXX a literal mask value.
|
||||
# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.
|
||||
string_mask = utf8only
|
||||
|
||||
# req_extensions = v3_req # The extensions to add to a certificate request
|
||||
|
||||
[ req_distinguished_name ]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = AU
|
||||
countryName_min = 2
|
||||
countryName_max = 2
|
||||
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = Some-State
|
||||
|
||||
localityName = Locality Name (eg, city)
|
||||
|
||||
0.organizationName = Organization Name (eg, company)
|
||||
0.organizationName_default = Internet Widgits Pty Ltd
|
||||
|
||||
# we can do this but it is not needed normally :-)
|
||||
#1.organizationName = Second Organization Name (eg, company)
|
||||
#1.organizationName_default = World Wide Web Pty Ltd
|
||||
|
||||
organizationalUnitName = Organizational Unit Name (eg, section)
|
||||
#organizationalUnitName_default =
|
||||
|
||||
commonName = Common Name (e.g. server FQDN or YOUR name)
|
||||
commonName_max = 64
|
||||
|
||||
emailAddress = Email Address
|
||||
emailAddress_max = 64
|
||||
|
||||
# SET-ex3 = SET extension number 3
|
||||
|
||||
[ req_attributes ]
|
||||
challengePassword = A challenge password
|
||||
challengePassword_min = 4
|
||||
challengePassword_max = 20
|
||||
|
||||
unstructuredName = An optional company name
|
||||
|
||||
[ usr_cert ]
|
||||
|
||||
# These extensions are added when 'ca' signs a request.
|
||||
|
||||
# This goes against PKIX guidelines but some CAs do it and some software
|
||||
# requires this to avoid interpreting an end user certificate as a CA.
|
||||
|
||||
basicConstraints=CA:FALSE
|
||||
|
||||
# Here are some examples of the usage of nsCertType. If it is omitted
|
||||
# the certificate can be used for anything *except* object signing.
|
||||
|
||||
# This is OK for an SSL server.
|
||||
# nsCertType = server
|
||||
|
||||
# For an object signing certificate this would be used.
|
||||
# nsCertType = objsign
|
||||
|
||||
# For normal client use this is typical
|
||||
# nsCertType = client, email
|
||||
|
||||
# and for everything including object signing:
|
||||
# nsCertType = client, email, objsign
|
||||
|
||||
# This is typical in keyUsage for a client certificate.
|
||||
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
|
||||
# This will be displayed in Netscape's comment listbox.
|
||||
nsComment = "OpenSSL Generated Certificate"
|
||||
|
||||
# PKIX recommendations harmless if included in all certificates.
|
||||
subjectKeyIdentifier=hash
|
||||
authorityKeyIdentifier=keyid,issuer
|
||||
|
||||
# This stuff is for subjectAltName and issuerAltname.
|
||||
# Import the email address.
|
||||
# subjectAltName=email:copy
|
||||
# An alternative to produce certificates that aren't
|
||||
# deprecated according to PKIX.
|
||||
# subjectAltName=email:move
|
||||
|
||||
# Copy subject details
|
||||
# issuerAltName=issuer:copy
|
||||
|
||||
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
|
||||
#nsBaseUrl
|
||||
#nsRevocationUrl
|
||||
#nsRenewalUrl
|
||||
#nsCaPolicyUrl
|
||||
#nsSslServerName
|
||||
|
||||
# This is required for TSA certificates.
|
||||
# extendedKeyUsage = critical,timeStamping
|
||||
|
||||
[ v3_req ]
|
||||
|
||||
# Extensions to add to a certificate request
|
||||
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
|
||||
[ v3_ca ]
|
||||
|
||||
|
||||
# Extensions for a typical CA
|
||||
|
||||
|
||||
# PKIX recommendation.
|
||||
|
||||
subjectKeyIdentifier=hash
|
||||
|
||||
authorityKeyIdentifier=keyid:always,issuer
|
||||
|
||||
# This is what PKIX recommends but some broken software chokes on critical
|
||||
# extensions.
|
||||
#basicConstraints = critical,CA:true
|
||||
# So we do this instead.
|
||||
basicConstraints = CA:true
|
||||
|
||||
# Key usage: this is typical for a CA certificate. However since it will
|
||||
# prevent it being used as an test self-signed certificate it is best
|
||||
# left out by default.
|
||||
# keyUsage = cRLSign, keyCertSign
|
||||
|
||||
# Some might want this also
|
||||
# nsCertType = sslCA, emailCA
|
||||
|
||||
# Include email address in subject alt name: another PKIX recommendation
|
||||
# subjectAltName=email:copy
|
||||
# Copy issuer details
|
||||
# issuerAltName=issuer:copy
|
||||
|
||||
# DER hex encoding of an extension: beware experts only!
|
||||
# obj=DER:02:03
|
||||
# Where 'obj' is a standard or added object
|
||||
# You can even override a supported extension:
|
||||
# basicConstraints= critical, DER:30:03:01:01:FF
|
||||
|
||||
[ crl_ext ]
|
||||
|
||||
# CRL extensions.
|
||||
# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
|
||||
|
||||
# issuerAltName=issuer:copy
|
||||
authorityKeyIdentifier=keyid:always
|
||||
|
||||
[ proxy_cert_ext ]
|
||||
# These extensions should be added when creating a proxy certificate
|
||||
|
||||
# This goes against PKIX guidelines but some CAs do it and some software
|
||||
# requires this to avoid interpreting an end user certificate as a CA.
|
||||
|
||||
basicConstraints=CA:FALSE
|
||||
|
||||
# Here are some examples of the usage of nsCertType. If it is omitted
|
||||
# the certificate can be used for anything *except* object signing.
|
||||
|
||||
# This is OK for an SSL server.
|
||||
# nsCertType = server
|
||||
|
||||
# For an object signing certificate this would be used.
|
||||
# nsCertType = objsign
|
||||
|
||||
# For normal client use this is typical
|
||||
# nsCertType = client, email
|
||||
|
||||
# and for everything including object signing:
|
||||
# nsCertType = client, email, objsign
|
||||
|
||||
# This is typical in keyUsage for a client certificate.
|
||||
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
|
||||
# This will be displayed in Netscape's comment listbox.
|
||||
nsComment = "OpenSSL Generated Certificate"
|
||||
|
||||
# PKIX recommendations harmless if included in all certificates.
|
||||
subjectKeyIdentifier=hash
|
||||
authorityKeyIdentifier=keyid,issuer
|
||||
|
||||
# This stuff is for subjectAltName and issuerAltname.
|
||||
# Import the email address.
|
||||
# subjectAltName=email:copy
|
||||
# An alternative to produce certificates that aren't
|
||||
# deprecated according to PKIX.
|
||||
# subjectAltName=email:move
|
||||
|
||||
# Copy subject details
|
||||
# issuerAltName=issuer:copy
|
||||
|
||||
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
|
||||
#nsBaseUrl
|
||||
#nsRevocationUrl
|
||||
#nsRenewalUrl
|
||||
#nsCaPolicyUrl
|
||||
#nsSslServerName
|
||||
|
||||
# This really needs to be in place for it to be a proxy certificate.
|
||||
proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
|
||||
|
||||
####################################################################
|
||||
[ tsa ]
|
||||
|
||||
default_tsa = tsa_config1 # the default TSA section
|
||||
|
||||
[ tsa_config1 ]
|
||||
|
||||
# These are used by the TSA reply generation only.
|
||||
dir = ./demoCA # TSA root directory
|
||||
serial = $dir/tsaserial # The current serial number (mandatory)
|
||||
crypto_device = builtin # OpenSSL engine to use for signing
|
||||
signer_cert = $dir/tsacert.pem # The TSA signing certificate
|
||||
# (optional)
|
||||
certs = $dir/cacert.pem # Certificate chain to include in reply
|
||||
# (optional)
|
||||
signer_key = $dir/private/tsakey.pem # The TSA private key (optional)
|
||||
|
||||
default_policy = tsa_policy1 # Policy if request did not specify it
|
||||
# (optional)
|
||||
other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)
|
||||
digests = md5, sha1 # Acceptable message digests (mandatory)
|
||||
accuracy = secs:1, millisecs:500, microsecs:100 # (optional)
|
||||
clock_precision_digits = 0 # number of digits after dot. (optional)
|
||||
ordering = yes # Is ordering defined for timestamps?
|
||||
# (optional, default: no)
|
||||
tsa_name = yes # Must the TSA name be included in the reply?
|
||||
# (optional, default: no)
|
||||
ess_cert_id_chain = no # Must the ESS cert id chain be included?
|
||||
# (optional, default: no)
|
8
etc/init/octavia-agent.conf
Normal file
8
etc/init/octavia-agent.conf
Normal file
@ -0,0 +1,8 @@
|
||||
description "Start up the Octavia Amphora Agent"
|
||||
|
||||
start on startup
|
||||
|
||||
respawn
|
||||
respawn limit 2 2
|
||||
|
||||
exec amphora-agent --config-file /etc/octavia/octavia.conf
|
@ -51,10 +51,10 @@
|
||||
|
||||
[networking]
|
||||
# Network to communicate with amphora
|
||||
# lb_network_name =
|
||||
|
||||
|
||||
[haproxy_amphora]
|
||||
# username = ubuntu
|
||||
# key_path = /opt/stack/.ssh/id_rsa
|
||||
# base_path = /var/lib/octavia
|
||||
# base_cert_dir = /var/lib/octavia/certs
|
||||
# haproxy_template = /var/lib/octavia/custom_template
|
||||
@ -63,6 +63,21 @@
|
||||
# connection_max_retries = 10
|
||||
# connection_retry_interval = 5
|
||||
|
||||
#SSH Driver specific
|
||||
# username = ubuntu
|
||||
# key_path = /opt/stack/.ssh/id_rsa
|
||||
|
||||
# REST Driver specific
|
||||
# bind_host = 0.0.0.0
|
||||
# bind_port = 9191
|
||||
# haproxy_cmd = /usr/sbin/haproxy
|
||||
# respawn_count = 2
|
||||
# respawn_interval = 2
|
||||
# Change for production to a ram drive
|
||||
# haproxy_cert_dir = /tmp
|
||||
# agent_server_cert = /etc/octavia/certs/server.pem
|
||||
# agent_server_ca = /etc/octavia/certs/client_ca.pem
|
||||
|
||||
[controller_worker]
|
||||
# amp_active_wait_sec = 10
|
||||
# Nova parameters to use when booting amphora
|
||||
@ -106,4 +121,4 @@
|
||||
# rpc_thread_pool_size = 2
|
||||
|
||||
# Topic (i.e. Queue) Name
|
||||
# topic = octavia_prov
|
||||
# topic = octavia_prov
|
0
octavia/amphorae/backends/agent/__init__.py
Normal file
0
octavia/amphorae/backends/agent/__init__.py
Normal file
15
octavia/amphorae/backends/agent/api_server/__init__.py
Normal file
15
octavia/amphorae/backends/agent/api_server/__init__.py
Normal file
@ -0,0 +1,15 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
VERSION = '0.5'
|
140
octavia/amphorae/backends/agent/api_server/amphora_info.py
Normal file
140
octavia/amphorae/backends/agent/api_server/amphora_info.py
Normal file
@ -0,0 +1,140 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
import flask
|
||||
import netifaces
|
||||
|
||||
from octavia.amphorae.backends.agent import api_server
|
||||
from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.common import constants as consts
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def compile_amphora_info():
|
||||
return flask.jsonify(
|
||||
{'hostname': socket.gethostname(),
|
||||
'haproxy_version': _get_version_of_installed_package('haproxy'),
|
||||
'api_version': api_server.VERSION})
|
||||
|
||||
|
||||
def compile_amphora_details():
|
||||
listener_list = util.get_listeners()
|
||||
meminfo = _get_meminfo()
|
||||
cpu = _cpu()
|
||||
st = os.statvfs('/')
|
||||
return flask.jsonify(
|
||||
{'hostname': socket.gethostname(),
|
||||
'haproxy_version': _get_version_of_installed_package('haproxy'),
|
||||
'api_version': api_server.VERSION,
|
||||
'networks': _get_networks(),
|
||||
'active': True,
|
||||
'haproxy_count': _count_haproxy_processes(listener_list),
|
||||
'cpu': {
|
||||
'total': cpu['total'],
|
||||
'user': cpu['user'],
|
||||
'system': cpu['system'],
|
||||
'soft_irq': cpu['softirq'], },
|
||||
'memory': {
|
||||
'total': meminfo['MemTotal'],
|
||||
'free': meminfo['MemFree'],
|
||||
'buffers': meminfo['Buffers'],
|
||||
'cached': meminfo['Cached'],
|
||||
'swap_used': meminfo['SwapCached'],
|
||||
'shared': meminfo['Shmem'],
|
||||
'slab': meminfo['Slab'], },
|
||||
'disk': {
|
||||
'used': (st.f_blocks - st.f_bfree) * st.f_frsize,
|
||||
'available': st.f_bavail * st.f_frsize},
|
||||
'load': [
|
||||
_load()],
|
||||
'topology': consts.TOPOLOGY_SINGLE,
|
||||
'topology_status': consts.TOPOLOGY_STATUS_OK,
|
||||
'listeners': listener_list,
|
||||
'packages': {}})
|
||||
|
||||
|
||||
def _get_version_of_installed_package(name):
|
||||
cmd = "dpkg --status " + name
|
||||
out = subprocess.check_output(cmd.split())
|
||||
m = re.search('Version: .*', out)
|
||||
return m.group(0)[len('Version: '):]
|
||||
|
||||
|
||||
def _get_network_bytes(interface, type):
|
||||
file_name = "/sys/class/net/{interface}/statistics/{type}_bytes".format(
|
||||
interface=interface, type=type)
|
||||
with open(file_name, 'r') as f:
|
||||
return f.readline()
|
||||
|
||||
|
||||
def _count_haproxy_processes(listener_list):
|
||||
num = 0
|
||||
for listener_id in listener_list:
|
||||
if util.is_listener_running(listener_id):
|
||||
# optional check if it's still running
|
||||
num += 1
|
||||
return num
|
||||
|
||||
|
||||
def _get_meminfo():
|
||||
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
|
||||
result = dict()
|
||||
for line in open('/proc/meminfo'):
|
||||
match = re_parser.match(line)
|
||||
if not match:
|
||||
continue # skip lines that don't parse
|
||||
key, value = match.groups(['key', 'value'])
|
||||
result[key] = int(value)
|
||||
return result
|
||||
|
||||
|
||||
def _cpu():
|
||||
with open('/proc/stat') as f:
|
||||
cpu = f.readline()
|
||||
vals = cpu.split(' ')
|
||||
return {
|
||||
'user': vals[2],
|
||||
'nice': vals[3],
|
||||
'system': vals[4],
|
||||
'idle': vals[5],
|
||||
'iowait': vals[6],
|
||||
'irq': vals[7],
|
||||
'softirq': vals[8],
|
||||
'total': sum([int(i) for i in vals[2:]])
|
||||
}
|
||||
|
||||
|
||||
def _load():
|
||||
with open('/proc/loadavg') as f:
|
||||
load = f.readline()
|
||||
vals = load.split(' ')
|
||||
return vals[:3]
|
||||
|
||||
|
||||
def _get_networks():
|
||||
networks = dict()
|
||||
for interface in netifaces.interfaces():
|
||||
if not interface.startswith('eth'):
|
||||
continue
|
||||
networks[interface] = dict(
|
||||
network_tx=_get_network_bytes(interface, 'tx'),
|
||||
network_rx=_get_network_bytes(interface, 'rx'))
|
||||
return networks
|
369
octavia/amphorae/backends/agent/api_server/listener.py
Normal file
369
octavia/amphorae/backends/agent/api_server/listener.py
Normal file
@ -0,0 +1,369 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
import flask
|
||||
import jinja2
|
||||
from werkzeug import exceptions
|
||||
|
||||
from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.amphorae.backends.utils import haproxy_query as query
|
||||
from octavia.common import constants as consts
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
BUFFER = 100
|
||||
HAPROXY_CONF = 'haproxy.conf.j2'
|
||||
|
||||
j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(
|
||||
os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES))
|
||||
template = j2_env.get_template(HAPROXY_CONF)
|
||||
|
||||
|
||||
class ParsingError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# Wrap a stream so we can compute the md5 while reading
|
||||
class Wrapped(object):
|
||||
def __init__(self, stream_):
|
||||
self.stream = stream_
|
||||
self.hash = hashlib.md5()
|
||||
|
||||
def read(self, l):
|
||||
block = self.stream.read(l)
|
||||
if block:
|
||||
self.hash.update(block)
|
||||
return block
|
||||
|
||||
def get_md5(self):
|
||||
return self.hash.hexdigest()
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.stream, attr)
|
||||
|
||||
|
||||
"""Gets the haproxy config
|
||||
|
||||
:param listenerid: the id of the listener
|
||||
"""
|
||||
|
||||
|
||||
def get_haproxy_config(listener_id):
|
||||
_check_listener_exists(listener_id)
|
||||
with open(util.config_path(listener_id), 'r') as file:
|
||||
cfg = file.read()
|
||||
resp = flask.Response(cfg, mimetype='text/plain', )
|
||||
resp.headers['ETag'] = hashlib.md5(cfg).hexdigest()
|
||||
return resp
|
||||
|
||||
|
||||
"""Upload the haproxy config
|
||||
|
||||
:param listener_id: The id of the listener
|
||||
"""
|
||||
|
||||
|
||||
def upload_haproxy_config(listener_id):
|
||||
stream = Wrapped(flask.request.stream)
|
||||
|
||||
if not os.path.exists(util.haproxy_dir(listener_id)):
|
||||
os.makedirs(util.haproxy_dir(listener_id))
|
||||
|
||||
name = os.path.join(util.haproxy_dir(listener_id), 'haproxy.cfg.new')
|
||||
with open(name, 'w') as file:
|
||||
b = stream.read(BUFFER)
|
||||
while (b):
|
||||
file.write(b)
|
||||
b = stream.read(BUFFER)
|
||||
|
||||
# use haproxy to check the config
|
||||
cmd = "haproxy -c -f {config_file}".format(config_file=name)
|
||||
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.debug("Failed to verify haproxy file: {0}".format(e))
|
||||
os.remove(name) # delete file
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Invalid request",
|
||||
details=e.output)), 400)
|
||||
|
||||
# file ok - move it
|
||||
os.rename(name, util.config_path(listener_id))
|
||||
|
||||
if not os.path.exists(util.upstart_path(listener_id)):
|
||||
with open(util.upstart_path(listener_id), 'w') as text_file:
|
||||
text = template.render(
|
||||
haproxy_pid=util.pid_path(listener_id),
|
||||
haproxy_cmd=util.CONF.haproxy_amphora.haproxy_cmd,
|
||||
haproxy_cfg=util.config_path(listener_id),
|
||||
respawn_count=util.CONF.haproxy_amphora.respawn_count,
|
||||
respawn_interval=util.CONF.haproxy_amphora.respawn_interval
|
||||
)
|
||||
text_file.write(text)
|
||||
|
||||
res = flask.make_response(flask.jsonify({
|
||||
'message': 'OK'}), 202)
|
||||
res.headers['ETag'] = stream.get_md5()
|
||||
return res
|
||||
|
||||
|
||||
def start_stop_listener(listener_id, action):
|
||||
action = action.lower()
|
||||
if action not in ['start', 'stop', 'reload']:
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message='Invalid Request',
|
||||
details="Unknown action: {0}".format(action))), 400)
|
||||
|
||||
_check_listener_exists(listener_id)
|
||||
|
||||
cmd = ("/usr/sbin/service haproxy-{listener_id} {action}".format(
|
||||
listener_id=listener_id, action=action))
|
||||
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.debug("Failed to {0} HAProxy service: {1}".format(action, e))
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Error {0}ing haproxy".format(action),
|
||||
details=e.output)), 500)
|
||||
if action in ['stop', 'reload']:
|
||||
return flask.make_response(flask.jsonify(
|
||||
dict(message='OK',
|
||||
details='Listener {listener_id} {action}ed'.format(
|
||||
listener_id=listener_id, action=action))), 202)
|
||||
|
||||
details = (
|
||||
'Configuration file is valid\nhaproxy daemon for {0} '.format(
|
||||
listener_id) + 'started')
|
||||
|
||||
return flask.make_response(flask.jsonify(
|
||||
dict(message='OK',
|
||||
details=details)), 202)
|
||||
|
||||
|
||||
def delete_listener(listener_id):
|
||||
_check_listener_exists(listener_id)
|
||||
|
||||
# check if that haproxy is still running and if stop it
|
||||
if os.path.exists(util.pid_path(listener_id)) and os.path.exists(
|
||||
os.path.join('/proc', util.get_haproxy_pid(listener_id))):
|
||||
cmd = "/usr/sbin/service haproxy-{0} stop".format(listener_id)
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.debug("Failed to stop HAProxy service: {0}".format(e))
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Error stopping haproxy",
|
||||
details=e.output)), 500)
|
||||
|
||||
# parse config and delete stats socket
|
||||
try:
|
||||
cfg = _parse_haproxy_file(listener_id)
|
||||
os.remove(cfg['stats_socket'])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# delete the ssl files
|
||||
try:
|
||||
shutil.rmtree(_cert_dir(listener_id))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# delete the directory + upstart script for that listener
|
||||
shutil.rmtree(util.haproxy_dir(listener_id))
|
||||
if os.path.exists(util.upstart_path(listener_id)):
|
||||
os.remove(util.upstart_path(listener_id))
|
||||
|
||||
return flask.jsonify({'message': 'OK'})
|
||||
|
||||
|
||||
"""Gets the status of all listeners
|
||||
|
||||
This method will not consult the stats socket
|
||||
so a listener might show as ACTIVE but still be
|
||||
in ERROR
|
||||
|
||||
Currently type==SSL is also not detected
|
||||
"""
|
||||
|
||||
|
||||
def get_all_listeners_status():
|
||||
listeners = list()
|
||||
|
||||
for listener in util.get_listeners():
|
||||
listeners.append({
|
||||
'status': _check_listener_status(listener),
|
||||
'uuid': listener,
|
||||
'type': _parse_haproxy_file(listener)['mode'],
|
||||
})
|
||||
|
||||
# Can't use jsonify since lists are not supported
|
||||
# for security reason: http://stackoverflow.com/
|
||||
# questions/12435297/how-do-i-jsonify-a-list-in-flask
|
||||
return flask.Response(json.dumps(listeners),
|
||||
mimetype='application/json')
|
||||
|
||||
|
||||
"""Gets the status of a listener
|
||||
|
||||
This method will consult the stats socket
|
||||
so calling this method will interfere with
|
||||
the health daemon with the risk of the amphora
|
||||
shut down
|
||||
|
||||
Currently type==SSL is not detected
|
||||
"""
|
||||
|
||||
|
||||
def get_listener_status(listener_id):
|
||||
_check_listener_exists(listener_id)
|
||||
|
||||
status = _check_listener_status(listener_id)
|
||||
cfg = _parse_haproxy_file(listener_id)
|
||||
stats = dict(
|
||||
status=status,
|
||||
uuid=listener_id,
|
||||
type=cfg['mode']
|
||||
)
|
||||
# not active don't bother...
|
||||
if status != consts.ACTIVE:
|
||||
return flask.jsonify(stats)
|
||||
|
||||
# read stats socket
|
||||
q = query.HAProxyQuery(cfg['stats_socket'])
|
||||
servers = q.get_pool_status()
|
||||
stats['pools'] = servers.values()
|
||||
return flask.jsonify(stats)
|
||||
|
||||
|
||||
def upload_certificate(listener_id, filename):
|
||||
_check_listener_exists(listener_id)
|
||||
_check_ssl_filename_format(filename)
|
||||
|
||||
# create directory if not already there
|
||||
if not os.path.exists(_cert_dir(listener_id)):
|
||||
os.makedirs(_cert_dir(listener_id))
|
||||
|
||||
stream = Wrapped(flask.request.stream)
|
||||
with open(_cert_file_path(listener_id, filename), 'w') as crt_file:
|
||||
b = stream.read(BUFFER)
|
||||
while (b):
|
||||
crt_file.write(b)
|
||||
b = stream.read(BUFFER)
|
||||
os.fchmod(crt_file.fileno(), 0o600) # only accessible by owner
|
||||
|
||||
resp = flask.jsonify(dict(message='OK'))
|
||||
resp.headers['ETag'] = stream.get_md5()
|
||||
return resp
|
||||
|
||||
|
||||
def get_certificate_md5(listener_id, filename):
|
||||
_check_listener_exists(listener_id)
|
||||
_check_ssl_filename_format(filename)
|
||||
|
||||
if not os.path.exists(_cert_file_path(listener_id, filename)):
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message='Certificate Not Found',
|
||||
details="No certificate with filename: {f}".format(
|
||||
f=filename))), 404)
|
||||
|
||||
with open(_cert_file_path(listener_id, filename), 'r') as crt_file:
|
||||
cert = crt_file.read()
|
||||
md5 = hashlib.md5(cert).hexdigest()
|
||||
resp = flask.jsonify(dict(md5sum=md5))
|
||||
resp.headers['ETag'] = md5
|
||||
return resp
|
||||
|
||||
|
||||
def delete_certificate(listener_id, filename):
|
||||
_check_ssl_filename_format(filename)
|
||||
if not os.path.exists(_cert_file_path(listener_id, filename)):
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message='Certificate Not Found',
|
||||
details="No certificate with filename: {f}".format(
|
||||
f=filename))), 404)
|
||||
|
||||
os.remove(_cert_file_path(listener_id, filename))
|
||||
return flask.jsonify(dict(message='OK'))
|
||||
|
||||
|
||||
def _check_listener_status(listener_id):
|
||||
if os.path.exists(util.pid_path(listener_id)):
|
||||
if os.path.exists(
|
||||
os.path.join('/proc', util.get_haproxy_pid(listener_id))):
|
||||
return consts.ACTIVE
|
||||
else: # pid file but no process...
|
||||
return consts.ERROR
|
||||
else:
|
||||
return consts.OFFLINE
|
||||
|
||||
|
||||
def _parse_haproxy_file(listener_id):
|
||||
with open(util.config_path(listener_id), 'r') as file:
|
||||
cfg = file.read()
|
||||
|
||||
m = re.search('mode\s+(http|tcp)', cfg)
|
||||
if not m:
|
||||
raise ParsingError()
|
||||
mode = m.group(1).upper()
|
||||
|
||||
m = re.search('stats socket\s+(\S+)', cfg)
|
||||
if not m:
|
||||
raise ParsingError()
|
||||
stats_socket = m.group(1)
|
||||
|
||||
m = re.search('ssl crt\s+(\S+)', cfg)
|
||||
ssl_crt = None
|
||||
if m:
|
||||
ssl_crt = m.group(1)
|
||||
mode = 'TERMINATED_HTTPS'
|
||||
|
||||
return dict(mode=mode,
|
||||
stats_socket=stats_socket,
|
||||
ssl_crt=ssl_crt)
|
||||
|
||||
|
||||
def _check_listener_exists(listener_id):
|
||||
# check if we know about that listener
|
||||
if not os.path.exists(util.config_path(listener_id)):
|
||||
raise exceptions.HTTPException(
|
||||
response=flask.make_response(flask.jsonify(dict(
|
||||
message='Listener Not Found',
|
||||
details="No listener with UUID: {0}".format(
|
||||
listener_id))), 404))
|
||||
|
||||
|
||||
def _check_ssl_filename_format(filename):
|
||||
# check if the format is (xxx.)*xxx.pem
|
||||
if not re.search('(\w.)+pem', filename):
|
||||
raise exceptions.HTTPException(
|
||||
response=flask.make_response(flask.jsonify(dict(
|
||||
message='Filename has wrong format')), 400))
|
||||
|
||||
|
||||
def _cert_dir(listener_id):
|
||||
return os.path.join(util.CONF.haproxy_amphora.haproxy_cert_dir,
|
||||
listener_id)
|
||||
|
||||
|
||||
def _cert_file_path(listener_id, filename):
|
||||
return os.path.join(_cert_dir(listener_id), filename)
|
125
octavia/amphorae/backends/agent/api_server/plug.py
Normal file
125
octavia/amphorae/backends/agent/api_server/plug.py
Normal file
@ -0,0 +1,125 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
import flask
|
||||
import jinja2
|
||||
import netifaces
|
||||
from werkzeug import exceptions
|
||||
|
||||
from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.common import constants as consts
|
||||
|
||||
ETH_PORT_CONF = 'plug_vip_ethX.conf.j2'
|
||||
|
||||
ETH_X_VIP_CONF = 'plug_port_ethX.conf.j2'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(
|
||||
os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES))
|
||||
template_port = j2_env.get_template(ETH_X_VIP_CONF)
|
||||
template_vip = j2_env.get_template(ETH_PORT_CONF)
|
||||
|
||||
|
||||
def plug_vip(vip):
|
||||
# validate vip
|
||||
try:
|
||||
socket.inet_aton(vip)
|
||||
except socket.error:
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Invalid VIP")), 400)
|
||||
|
||||
interface = _interface_down()
|
||||
|
||||
# assume for now only a fixed subnet size
|
||||
sections = vip.split('.')[:3]
|
||||
sections.append('255')
|
||||
broadcast = '.'.join(sections)
|
||||
|
||||
# write interface file
|
||||
with open(util.get_network_interface_file(interface), 'w') as text_file:
|
||||
text = template_vip.render(
|
||||
interface=interface,
|
||||
vip=vip,
|
||||
broadcast=broadcast,
|
||||
# assume for now only a fixed subnet size
|
||||
netmask='255.255.255.0')
|
||||
text_file.write(text)
|
||||
|
||||
# bring interfaces up
|
||||
_bring_if_down("{interface}".format(interface=interface))
|
||||
_bring_if_down("{interface}:0".format(interface=interface))
|
||||
_bring_if_up("{interface}".format(interface=interface), 'VIP')
|
||||
_bring_if_up("{interface}:0".format(interface=interface), 'VIP')
|
||||
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="OK",
|
||||
details="VIP {vip} plugged on interface {interface}".format(
|
||||
vip=vip, interface=interface))), 202)
|
||||
|
||||
|
||||
def plug_network():
|
||||
interface = _interface_down()
|
||||
|
||||
# write interface file
|
||||
with open(util.get_network_interface_file(interface), 'w') as text_file:
|
||||
text = template_port.render(interface=interface)
|
||||
text_file.write(text)
|
||||
|
||||
_bring_if_down("{interface}:0".format(interface=interface))
|
||||
_bring_if_up("{interface}:0".format(interface=interface), 'network')
|
||||
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="OK",
|
||||
details="Plugged on interface {interface}".format(
|
||||
interface=interface))), 202)
|
||||
|
||||
|
||||
def _interface_down():
|
||||
# Find the interface which is down
|
||||
down = [interface for interface in netifaces.interfaces() if
|
||||
netifaces.AF_INET not in netifaces.ifaddresses(interface)]
|
||||
if len(down) != 1:
|
||||
# There should only be ONE interface being plugged; if there is
|
||||
# none down or more than one we have a problem...
|
||||
raise exceptions.HTTPException(
|
||||
response=flask.make_response(flask.jsonify(dict(
|
||||
details="No suitable network interface found")), 404))
|
||||
return down[0]
|
||||
|
||||
|
||||
def _bring_if_up(params, what):
|
||||
# bring interface up
|
||||
cmd = "ifup {params}".format(params=params)
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.debug("Failed to if up {0}".format(e))
|
||||
raise exceptions.HTTPException(
|
||||
response=flask.make_response(flask.jsonify(dict(
|
||||
message='Error plugging {0}'.format(what),
|
||||
details=e.output)), 500))
|
||||
|
||||
|
||||
def _bring_if_down(params):
|
||||
cmd = "ifdown {params}".format(params=params)
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
118
octavia/amphorae/backends/agent/api_server/server.py
Normal file
118
octavia/amphorae/backends/agent/api_server/server.py
Normal file
@ -0,0 +1,118 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
import flask
|
||||
from werkzeug import exceptions
|
||||
|
||||
from octavia.amphorae.backends.agent import api_server
|
||||
from octavia.amphorae.backends.agent.api_server import amphora_info
|
||||
from octavia.amphorae.backends.agent.api_server import listener
|
||||
from octavia.amphorae.backends.agent.api_server import plug
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
|
||||
|
||||
# make the error pages all json
|
||||
def make_json_error(ex):
|
||||
code = ex.code if isinstance(ex, exceptions.HTTPException) else 500
|
||||
response = flask.jsonify({'error': str(ex), 'http_code': code})
|
||||
response.status_code = code
|
||||
return response
|
||||
|
||||
|
||||
for code in exceptions.default_exceptions.iterkeys():
|
||||
app.error_handler_spec[None][code] = make_json_error
|
||||
|
||||
|
||||
# Tested with curl -k -XPUT --data-binary @/tmp/test.txt
|
||||
# https://127.0.0.1:8443/0.5/listeners/123/haproxy
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>/haproxy',
|
||||
methods=['PUT'])
|
||||
def upload_happroxy_config(listener_id):
|
||||
return listener.upload_haproxy_config(listener_id)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>/haproxy',
|
||||
methods=['GET'])
|
||||
def get_happroxy_config(listener_id):
|
||||
return listener.get_haproxy_config(listener_id)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION +
|
||||
'/listeners/<listener_id>/<action>',
|
||||
methods=['PUT'])
|
||||
def start_stop_listener(listener_id, action):
|
||||
return listener.start_stop_listener(listener_id, action)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION +
|
||||
'/listeners/<listener_id>', methods=['DELETE'])
|
||||
def delete_listener(listener_id):
|
||||
return listener.delete_listener(listener_id)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/details',
|
||||
methods=['GET'])
|
||||
def get_details():
|
||||
return amphora_info.compile_amphora_details()
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/info',
|
||||
methods=['GET'])
|
||||
def get_info():
|
||||
return amphora_info.compile_amphora_info()
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners',
|
||||
methods=['GET'])
|
||||
def get_all_listeners_status():
|
||||
return listener.get_all_listeners_status()
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>',
|
||||
methods=['GET'])
|
||||
def get_listener_status(listener_id):
|
||||
return listener.get_listener_status(listener_id)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>/certificates'
|
||||
+ '/<filename>', methods=['PUT'])
|
||||
def upload_certificate(listener_id, filename):
|
||||
return listener.upload_certificate(listener_id, filename)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>/certificates'
|
||||
+ '/<filename>', methods=['GET'])
|
||||
def get_certificate_md5(listener_id, filename):
|
||||
return listener.get_certificate_md5(listener_id, filename)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/listeners/<listener_id>/certificates'
|
||||
+ '/<filename>', methods=['DELETE'])
|
||||
def delete_certificate(listener_id, filename):
|
||||
return listener.delete_certificate(listener_id, filename)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/plug/vip/<vip>', methods=['POST'])
|
||||
def plug_vip(vip):
|
||||
return plug.plug_vip(vip)
|
||||
|
||||
|
||||
@app.route('/' + api_server.VERSION + '/plug/network', methods=['POST'])
|
||||
def plug_network():
|
||||
return plug.plug_network()
|
@ -0,0 +1,54 @@
|
||||
{#
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Inspired by https://gist.github.com/gfrey/8472007
|
||||
#}
|
||||
|
||||
description "Properly handle haproxy"
|
||||
|
||||
start on startup
|
||||
|
||||
env PID_PATH={{ haproxy_pid }}
|
||||
env BIN_PATH={{ haproxy_cmd }}
|
||||
env CONF_PATH={{ haproxy_cfg }}
|
||||
|
||||
respawn
|
||||
respawn limit {{ respawn_count }} {{respawn_interval}}
|
||||
|
||||
pre-start script
|
||||
[ -r $CONF_PATH ]
|
||||
end script
|
||||
|
||||
script
|
||||
exec /bin/bash <<EOF
|
||||
echo \$(date) Starting HAProxy
|
||||
$BIN_PATH -f $CONF_PATH -D -p $PID_PATH
|
||||
|
||||
trap "$BIN_PATH -f $CONF_PATH -p $PID_PATH -sf \\\$(cat $PID_PATH)" SIGHUP
|
||||
trap "kill -TERM \\\$(cat $PID_PATH) && rm $PID_PATH;echo \\\$(date) Exiting HAProxy; exit 0" SIGTERM SIGINT
|
||||
|
||||
while true; do # Iterate to keep job running.
|
||||
|
||||
# Check if HAProxy has failed and re-spawn
|
||||
kill -0 \$(cat $PID_PATH)
|
||||
if [ \$? -ne 0 ]; then
|
||||
echo \$(date) HAProxy failed. Respawning
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1 # Don't sleep to long as signals will not be handled during sleep.
|
||||
done
|
||||
EOF
|
||||
end script
|
@ -0,0 +1,18 @@
|
||||
{#
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#}
|
||||
# Generated by Octavia agent
|
||||
auto {{ interface }} {{ interface }}:0
|
||||
iface {{ interface }} inet dhcp
|
@ -0,0 +1,22 @@
|
||||
{#
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#}
|
||||
# Generated by Octavia agent
|
||||
auto {{ interface }} {{ interface }}:0
|
||||
iface {{ interface }} inet dhcp
|
||||
iface {{ interface }}:0 inet static
|
||||
address {{ vip }}
|
||||
broadcast {{ broadcast }}
|
||||
netmask {{ netmask }}
|
67
octavia/amphorae/backends/agent/api_server/util.py
Normal file
67
octavia/amphorae/backends/agent/api_server/util.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group('haproxy_amphora', 'octavia.common.config')
|
||||
UPSTART_DIR = '/etc/init'
|
||||
|
||||
|
||||
def upstart_path(listener_id):
|
||||
return os.path.join(UPSTART_DIR, ('haproxy-{0}.conf'.format(listener_id)))
|
||||
|
||||
|
||||
def haproxy_dir(listener_id):
|
||||
return os.path.join(CONF.haproxy_amphora.base_path, listener_id)
|
||||
|
||||
|
||||
def pid_path(listener_id):
|
||||
return os.path.join(haproxy_dir(listener_id), 'haproxy.pid')
|
||||
|
||||
|
||||
def config_path(listener_id):
|
||||
return os.path.join(haproxy_dir(listener_id), 'haproxy.cfg')
|
||||
|
||||
|
||||
def get_haproxy_pid(listener_id):
|
||||
with open(pid_path(listener_id), 'r') as f:
|
||||
return f.readline().rstrip()
|
||||
|
||||
|
||||
"""Get Listeners
|
||||
|
||||
:returns An array with the ids of all listeners, e.g. ['123', '456', ...]
|
||||
or [] if no listeners exist
|
||||
"""
|
||||
|
||||
|
||||
def get_listeners():
|
||||
if os.path.exists(CONF.haproxy_amphora.base_path):
|
||||
return [f for f in os.listdir(CONF.haproxy_amphora.base_path)
|
||||
if os.path.exists(config_path(f))]
|
||||
return []
|
||||
|
||||
|
||||
def is_listener_running(listener_id):
|
||||
return os.path.exists(pid_path(listener_id)) and os.path.exists(
|
||||
os.path.join('/proc', get_haproxy_pid(listener_id)))
|
||||
|
||||
|
||||
def get_network_interface_file(interface):
|
||||
return os.path.join(CONF.haproxy_amphora.agent_server_network_dir,
|
||||
interface + '.cfg')
|
0
octavia/amphorae/backends/utils/__init__.py
Normal file
0
octavia/amphorae/backends/utils/__init__.py
Normal file
128
octavia/amphorae/backends/utils/haproxy_query.py
Normal file
128
octavia/amphorae/backends/utils/haproxy_query.py
Normal file
@ -0,0 +1,128 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import socket
|
||||
|
||||
from octavia.common import constants as consts
|
||||
|
||||
|
||||
class HAProxyQuery(object):
|
||||
"""Class used for querying the HAProxy statistics socket.
|
||||
|
||||
The CSV output is defined in the HAProxy documentation:
|
||||
|
||||
http://cbonte.github.io/haproxy-dconv/configuration-1.4.html#9
|
||||
"""
|
||||
|
||||
def __init__(self, stats_socket):
|
||||
"""stats_socket
|
||||
|
||||
Path to the HAProxy statistics socket file.
|
||||
"""
|
||||
|
||||
self.socket = stats_socket
|
||||
|
||||
def _query(self, query):
|
||||
"""Send the given query to the haproxy statistics socket.
|
||||
|
||||
:returns the output of a successful query as a string with trailing
|
||||
newlines removed, or raise an Exception if the query fails.
|
||||
"""
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
|
||||
try:
|
||||
sock.connect(self.socket)
|
||||
except socket.error:
|
||||
raise Exception("HAProxy '{0}' query failed.".format(query))
|
||||
|
||||
try:
|
||||
sock.send(query + '\n')
|
||||
data = ''
|
||||
while True:
|
||||
x = sock.recv(1024)
|
||||
if not x:
|
||||
break
|
||||
data += x
|
||||
return data.rstrip()
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
def show_info(self):
|
||||
"""Get and parse output from 'show info' command."""
|
||||
results = self._query('show info')
|
||||
list_results = results.split('\n')
|
||||
return list_results
|
||||
|
||||
def show_stat(self, proxy_iid=-1, object_type=-1, server_id=-1):
|
||||
"""Get and parse output from 'show status' command.
|
||||
|
||||
:param proxy_iid
|
||||
Proxy ID (column 27 in CSV output). -1 for all.
|
||||
|
||||
:param object_type
|
||||
Select the type of dumpable object. Values can be ORed.
|
||||
-1 - everything
|
||||
1 - frontends
|
||||
2 - backends
|
||||
4 - servers
|
||||
|
||||
:param server_id
|
||||
Server ID (column 28 in CSV output?), or -1 for everything.
|
||||
|
||||
:return stats (split into an array by \n)
|
||||
"""
|
||||
|
||||
results = self._query(
|
||||
'show stat {proxy_iid} {object_type}'
|
||||
+ '{server_id}'.format(
|
||||
proxy_iid=proxy_iid,
|
||||
object_type=object_type,
|
||||
server_id=server_id))
|
||||
list_results = results.split('\n')
|
||||
return list_results
|
||||
|
||||
def get_pool_status(self):
|
||||
"""Get status for each server and the pool as a whole.
|
||||
|
||||
:returns pool data structure
|
||||
{<pool-name>: {
|
||||
'uuid': <uuid>,
|
||||
'status': 'UP'|'DOWN',
|
||||
'members': [
|
||||
<name>: 'UP'|'DOWN'
|
||||
]
|
||||
"""
|
||||
|
||||
results = self.show_stat(object_type=6) # servers + pool
|
||||
|
||||
final_results = {}
|
||||
for line in results[1:]:
|
||||
elements = line.split(',')
|
||||
# 0-pool 1 - server name, 17 - status
|
||||
|
||||
# All the way up is UP, otherwise call it DOWN
|
||||
if elements[17] != consts.AMPHORA_UP:
|
||||
elements[17] = consts.AMPHORA_DOWN
|
||||
|
||||
if elements[0] not in final_results:
|
||||
final_results[elements[0]] = dict(members=[])
|
||||
|
||||
if elements[1] == 'BACKEND':
|
||||
final_results[elements[0]]['uuid'] = elements[0]
|
||||
final_results[elements[0]]['status'] = elements[17]
|
||||
else:
|
||||
final_results[elements[0]]['members'].append(
|
||||
{elements[1]: elements[17]})
|
||||
return final_results
|
72
octavia/cmd/agent.py
Executable file
72
octavia/cmd/agent.py
Executable file
@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# make sure PYTHONPATH includes the home directory if you didn't install
|
||||
|
||||
import logging
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from werkzeug import serving
|
||||
|
||||
from octavia.amphorae.backends.agent.api_server import server
|
||||
from octavia.common import service
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group('haproxy_amphora', 'octavia.common.config')
|
||||
|
||||
|
||||
# Hack: Use werkzeugs context
|
||||
# also http://stackoverflow.com/questions/23262768/
|
||||
# two-way-ssl-authentication-for-flask
|
||||
class OctaviaSSLContext(serving._SSLContext):
|
||||
def __init__(self, protocol):
|
||||
self._ca_certs = None
|
||||
super(OctaviaSSLContext, self).__init__(protocol)
|
||||
|
||||
def load_cert_chain(self, certfile, keyfile=None, password=None, ca=None):
|
||||
self._ca_certs = ca
|
||||
super(OctaviaSSLContext, self).load_cert_chain(
|
||||
certfile, keyfile, password)
|
||||
|
||||
def wrap_socket(self, sock, **kwargs):
|
||||
return super(OctaviaSSLContext, self).wrap_socket(
|
||||
sock,
|
||||
# Comment out for debugging if you want to connect without
|
||||
# a client cert
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs=self._ca_certs
|
||||
)
|
||||
|
||||
|
||||
# start api server
|
||||
def main():
|
||||
# comment out to improve logging
|
||||
service.prepare_service(sys.argv)
|
||||
|
||||
# We will only enforce that the client cert is from the good authority
|
||||
# todo(german): Watch this space for security improvements
|
||||
ctx = OctaviaSSLContext(ssl.PROTOCOL_SSLv23)
|
||||
|
||||
ctx.load_cert_chain(CONF.haproxy_amphora.agent_server_cert,
|
||||
ca=CONF.haproxy_amphora.agent_server_ca)
|
||||
|
||||
server.app.run(host=CONF.haproxy_amphora.bind_host,
|
||||
port=CONF.haproxy_amphora.bind_port,
|
||||
debug=CONF.debug,
|
||||
ssl_context=ctx)
|
@ -110,9 +110,32 @@ haproxy_amphora_opts = [
|
||||
cfg.IntOpt('connection_max_retries',
|
||||
default=10,
|
||||
help=_('Retry threshold for connecting to amphorae.')),
|
||||
cfg.FloatOpt('connection_retry_interval',
|
||||
default=1,
|
||||
help=_('Retry timeout between attempts.'))
|
||||
cfg.IntOpt('connection_retry_interval',
|
||||
default=5,
|
||||
help=_('Retry timeout between attempts in seconds.')),
|
||||
|
||||
# REST server
|
||||
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.IntOpt('bind_port', default=8443,
|
||||
help=_("The port to bind to")),
|
||||
cfg.StrOpt('haproxy_cmd', default='/usr/sbin/haproxy',
|
||||
help=_("The full path to haproxy")),
|
||||
cfg.IntOpt('respawn_count', default=2,
|
||||
help=_("The respawn count for haproxy's upstart script")),
|
||||
cfg.IntOpt('respawn_interval', default=2,
|
||||
help=_("The respawn interval for haproxy's upstart script")),
|
||||
cfg.StrOpt('haproxy_cert_dir', default='/tmp/',
|
||||
help=_("The directory to store haproxy cert files in")),
|
||||
cfg.StrOpt('agent_server_cert', default='/etc/octavia/certs/server.pem',
|
||||
help=_("The server certificate for the agent.py server "
|
||||
"to use")),
|
||||
cfg.StrOpt('agent_server_ca', default='/etc/octavia/certs/client_ca.pem',
|
||||
help=_("The ca which signed the client certificates")),
|
||||
cfg.StrOpt('agent_server_network_dir',
|
||||
default='/etc/network/interfaces.d/',
|
||||
help=_("The directory where new network interfaces "
|
||||
"are located"))
|
||||
]
|
||||
|
||||
controller_worker_opts = [
|
||||
@ -171,7 +194,6 @@ cfg.CONF.register_cli_opts(core_cli_opts)
|
||||
cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
|
||||
cfg.CONF.register_opts(keystone_authtoken_v3_opts,
|
||||
group='keystone_authtoken_v3')
|
||||
|
||||
# Ensure that the control exchange is set correctly
|
||||
messaging.set_transport_defaults(control_exchange='octavia')
|
||||
_SQL_CONNECTION_DEFAULT = 'sqlite://'
|
||||
|
@ -122,3 +122,8 @@ NOVA_3 = '3'
|
||||
NOVA_VERSIONS = (NOVA_1, NOVA_2, NOVA_3)
|
||||
|
||||
RPC_NAMESPACE_CONTROLLER_AGENT = 'controller'
|
||||
|
||||
TOPOLOGY_SINGLE = 'SINGLE'
|
||||
TOPOLOGY_STATUS_OK = 'OK'
|
||||
|
||||
AGENT_API_TEMPLATES = '/templates'
|
||||
|
0
octavia/tests/functional/amphorae/__init__.py
Normal file
0
octavia/tests/functional/amphorae/__init__.py
Normal file
@ -0,0 +1,560 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
import mock
|
||||
import netifaces
|
||||
|
||||
from octavia.amphorae.backends.agent import api_server
|
||||
from octavia.amphorae.backends.agent.api_server import server
|
||||
from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.common import constants as consts
|
||||
import octavia.tests.unit.base as base
|
||||
|
||||
RANDOM_ERROR = 'random error'
|
||||
OK = dict(message='OK')
|
||||
|
||||
|
||||
class ServerTestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
self.app = server.app.test_client()
|
||||
super(ServerTestCase, self).setUp()
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch('os.rename')
|
||||
@mock.patch('subprocess.check_output')
|
||||
@mock.patch('os.remove')
|
||||
def test_haproxy(self, mock_remove, mock_subprocess, mock_rename,
|
||||
mock_makedirs, mock_exists):
|
||||
mock_exists.return_value = True
|
||||
m = mock.mock_open()
|
||||
|
||||
# happy case upstart file exists
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/haproxy', data='test')
|
||||
self.assertEqual(202, rv.status_code)
|
||||
m.assert_called_once_with(
|
||||
'/var/lib/octavia/123/haproxy.cfg.new', 'w')
|
||||
handle = m()
|
||||
handle.write.assert_called_once_with('test')
|
||||
mock_subprocess.assert_called_once_with(
|
||||
"haproxy -c -f /var/lib/octavia/123/haproxy.cfg.new".split(),
|
||||
stderr=-2)
|
||||
mock_rename.assert_called_once_with(
|
||||
'/var/lib/octavia/123/haproxy.cfg.new',
|
||||
'/var/lib/octavia/123/haproxy.cfg')
|
||||
|
||||
# exception writing
|
||||
m = mock.Mock()
|
||||
m.side_effect = Exception() # open crashes
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/haproxy', data='test')
|
||||
self.assertEqual(500, rv.status_code)
|
||||
|
||||
# check if files get created
|
||||
mock_exists.return_value = False
|
||||
m = mock.mock_open()
|
||||
|
||||
# happy case upstart file exists
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/haproxy', data='test')
|
||||
self.assertEqual(202, rv.status_code)
|
||||
m.assert_any_call('/var/lib/octavia/123/haproxy.cfg.new', 'w')
|
||||
m.assert_any_call(util.UPSTART_DIR + '/haproxy-123.conf', 'w')
|
||||
handle = m()
|
||||
handle.write.assert_any_call('test')
|
||||
# skip the template stuff
|
||||
mock_makedirs.assert_called_with('/var/lib/octavia/123')
|
||||
|
||||
# unhappy case haproxy check fails
|
||||
mock_exists.return_value = True
|
||||
mock_subprocess.side_effect = [subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR)]
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/haproxy', data='test')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'Invalid request', u'details': u'random error'},
|
||||
json.loads(rv.data))
|
||||
m.assert_called_with('/var/lib/octavia/123/haproxy.cfg.new', 'w')
|
||||
handle = m()
|
||||
handle.write.assert_called_with('test')
|
||||
mock_subprocess.assert_called_with(
|
||||
"haproxy -c -f /var/lib/octavia/123/haproxy.cfg.new".split(),
|
||||
stderr=-2)
|
||||
mock_remove.assert_called_once_with(
|
||||
'/var/lib/octavia/123/haproxy.cfg.new')
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('subprocess.check_output')
|
||||
def test_start(self, mock_subprocess, mock_exists):
|
||||
rv = self.app.put('/' + api_server.VERSION + '/listeners/123/error')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'Invalid Request',
|
||||
'details': 'Unknown action: error', },
|
||||
json.loads(rv.data))
|
||||
|
||||
mock_exists.return_value = False
|
||||
rv = self.app.put('/' + api_server.VERSION + '/listeners/123/start')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'Listener Not Found',
|
||||
'details': 'No listener with UUID: 123'},
|
||||
json.loads(rv.data))
|
||||
mock_exists.assert_called_with('/var/lib/octavia/123/haproxy.cfg')
|
||||
|
||||
mock_exists.return_value = True
|
||||
rv = self.app.put('/' + api_server.VERSION + '/listeners/123/start')
|
||||
self.assertEqual(202, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'OK',
|
||||
'details': 'Configuration file is valid\nhaproxy daemon for'
|
||||
+ ' 123 started'},
|
||||
json.loads(rv.data))
|
||||
mock_subprocess.assert_called_with(
|
||||
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
|
||||
|
||||
mock_exists.return_value = True
|
||||
mock_subprocess.side_effect = subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR)
|
||||
rv = self.app.put('/' + api_server.VERSION + '/listeners/123/start')
|
||||
self.assertEqual(500, rv.status_code)
|
||||
self.assertEqual(
|
||||
{
|
||||
'message': 'Error starting haproxy',
|
||||
'details': RANDOM_ERROR,
|
||||
}, json.loads(rv.data))
|
||||
mock_subprocess.assert_called_with(
|
||||
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
|
||||
|
||||
@mock.patch('socket.gethostname')
|
||||
@mock.patch('subprocess.check_output')
|
||||
def test_info(self, mock_subbprocess, mock_hostname):
|
||||
mock_hostname.side_effect = ['test-host']
|
||||
mock_subbprocess.side_effect = [
|
||||
"""Package: haproxy
|
||||
Status: install ok installed
|
||||
Priority: optional
|
||||
Section: net
|
||||
Installed-Size: 803
|
||||
Maintainer: Ubuntu Developers
|
||||
Architecture: amd64
|
||||
Version: 1.4.24-2
|
||||
"""]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/info')
|
||||
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(dict(
|
||||
api_version='0.5',
|
||||
haproxy_version='1.4.24-2',
|
||||
hostname='test-host'),
|
||||
json.loads(rv.data))
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('subprocess.check_output')
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util.' +
|
||||
'get_haproxy_pid')
|
||||
@mock.patch('shutil.rmtree')
|
||||
@mock.patch('os.remove')
|
||||
def test_delete_listener(self, mock_remove, mock_rmtree, mock_pid,
|
||||
mock_check_output, mock_exists):
|
||||
mock_exists.return_value = False
|
||||
rv = self.app.delete('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'Listener Not Found',
|
||||
'details': 'No listener with UUID: 123'},
|
||||
json.loads(rv.data))
|
||||
mock_exists.assert_called_with('/var/lib/octavia/123/haproxy.cfg')
|
||||
|
||||
# service is stopped + no upstart script
|
||||
mock_exists.side_effect = [True, False, False]
|
||||
rv = self.app.delete('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual({u'message': u'OK'},
|
||||
json.loads(rv.data))
|
||||
mock_rmtree.assert_called_with('/var/lib/octavia/123')
|
||||
mock_exists.assert_called_with('/etc/init/haproxy-123.conf')
|
||||
mock_exists.assert_any_called_with('/var/lib/octavia/123/haproxy.pid')
|
||||
|
||||
# service is stopped + upstart script
|
||||
mock_exists.side_effect = [True, False, True]
|
||||
rv = self.app.delete('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual({u'message': u'OK'},
|
||||
json.loads(rv.data))
|
||||
mock_remove.assert_called_once_with('/etc/init/haproxy-123.conf')
|
||||
|
||||
# service is running + upstart script
|
||||
mock_exists.side_effect = [True, True, True, True]
|
||||
mock_pid.return_value = '456'
|
||||
rv = self.app.delete('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual({u'message': u'OK'},
|
||||
json.loads(rv.data))
|
||||
mock_pid.assert_called_once_with('123')
|
||||
mock_check_output.assert_called_once_with(
|
||||
['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2)
|
||||
|
||||
# service is running + stopping fails
|
||||
mock_exists.side_effect = [True, True, True]
|
||||
mock_check_output.side_effect = subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR)
|
||||
rv = self.app.delete('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(500, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'details': 'random error', 'message': 'Error stopping haproxy'},
|
||||
json.loads(rv.data))
|
||||
# that's the last call before exception
|
||||
mock_exists.assert_called_with('/proc/456')
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
def test_get_haproxy(self, mock_exists):
|
||||
CONTENT = "bibble\nbibble"
|
||||
mock_exists.side_effect = [False]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners/123/haproxy')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
|
||||
mock_exists.side_effect = [True]
|
||||
m = mock.mock_open(read_data=CONTENT)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.get('/' + api_server.VERSION +
|
||||
'/listeners/123/haproxy')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(CONTENT, rv.data)
|
||||
self.assertEqual('text/plain; charset=utf-8',
|
||||
rv.headers['Content-Type'])
|
||||
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||
+ 'get_listeners')
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.'
|
||||
+ '_check_listener_status')
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.'
|
||||
+ '_parse_haproxy_file')
|
||||
def test_get_all_listeners(self, mock_parse, mock_status, mock_listener):
|
||||
# no listeners
|
||||
mock_listener.side_effect = [[]]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners')
|
||||
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertFalse(json.loads(rv.data))
|
||||
|
||||
# one listener ACTIVE
|
||||
mock_listener.side_effect = [['123']]
|
||||
mock_parse.side_effect = [{'mode': 'test'}]
|
||||
mock_status.side_effect = [consts.ACTIVE]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners')
|
||||
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(
|
||||
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}],
|
||||
json.loads(rv.data))
|
||||
|
||||
# two listener one ACTIVE, one ERROR
|
||||
mock_listener.side_effect = [['123', '456']]
|
||||
mock_parse.side_effect = [{'mode': 'test'}, {'mode': 'http'}]
|
||||
mock_status.side_effect = [consts.ACTIVE, consts.ERROR]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners')
|
||||
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(
|
||||
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'},
|
||||
{'status': consts.ERROR, 'type': 'http', 'uuid': '456'}],
|
||||
json.loads(rv.data))
|
||||
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.'
|
||||
+ '_check_listener_status')
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.'
|
||||
+ '_parse_haproxy_file')
|
||||
@mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery')
|
||||
@mock.patch('os.path.exists')
|
||||
def test_get_listener(self, mock_exists, mock_query, mock_parse,
|
||||
mock_status):
|
||||
# Listener not found
|
||||
mock_exists.side_effect = [False]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'message': 'Listener Not Found',
|
||||
'details': 'No listener with UUID: 123'},
|
||||
json.loads(rv.data))
|
||||
|
||||
# Listener not ACTIVE
|
||||
mock_parse.side_effect = [dict(mode='test')]
|
||||
mock_status.side_effect = [consts.ERROR]
|
||||
mock_exists.side_effect = [True]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(dict(
|
||||
status=consts.ERROR,
|
||||
type='test',
|
||||
uuid='123'), json.loads(rv.data))
|
||||
|
||||
# Listener ACTIVE
|
||||
mock_parse.side_effect = [dict(mode='test', stats_socket='blah')]
|
||||
mock_status.side_effect = [consts.ACTIVE]
|
||||
mock_exists.side_effect = [True]
|
||||
mock_pool = mock.Mock()
|
||||
mock_query.side_effect = [mock_pool]
|
||||
mock_pool.get_pool_status.side_effect = [
|
||||
{'tcp-servers': {
|
||||
'status': 'DOWN',
|
||||
'uuid': 'tcp-servers',
|
||||
'members': [
|
||||
{'id-34833': 'DOWN'},
|
||||
{'id-34836': 'DOWN'}]}}]
|
||||
rv = self.app.get('/' + api_server.VERSION + '/listeners/123')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(dict(
|
||||
status=consts.ACTIVE,
|
||||
type='test',
|
||||
uuid='123',
|
||||
pools=[dict(
|
||||
status=consts.AMPHORA_DOWN,
|
||||
uuid='tcp-servers',
|
||||
members=[
|
||||
{u'id-34833': u'DOWN'},
|
||||
{u'id-34836': u'DOWN'}])]),
|
||||
json.loads(rv.data))
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('os.remove')
|
||||
def test_delete_cert(self, mock_remove, mock_exists):
|
||||
mock_exists.side_effect = [False]
|
||||
rv = self.app.delete('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(
|
||||
details='No certificate with filename: test.pem',
|
||||
message='Certificate Not Found'),
|
||||
json.loads(rv.data))
|
||||
mock_exists.assert_called_once_with('/tmp/123/test.pem')
|
||||
|
||||
# wrong file name
|
||||
mock_exists.side_effect = [True]
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.bla',
|
||||
data='TestTest')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
|
||||
mock_exists.side_effect = [True]
|
||||
rv = self.app.delete('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(OK, json.loads(rv.data))
|
||||
mock_remove.assert_called_once_with('/tmp/123/test.pem')
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
def test_get_certificate_md5(self, mock_exists):
|
||||
CONTENT = "TestTest"
|
||||
mock_exists.side_effect = [False]
|
||||
rv = self.app.get('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
|
||||
mock_exists.side_effect = [True, False]
|
||||
rv = self.app.get('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(
|
||||
details='No certificate with filename: test.pem',
|
||||
message='Certificate Not Found'),
|
||||
json.loads(rv.data))
|
||||
mock_exists.assert_called_with('/tmp/123/test.pem')
|
||||
|
||||
# wrong file name
|
||||
mock_exists.side_effect = [True]
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.bla',
|
||||
data='TestTest')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
|
||||
mock_exists.side_effect = [True, True]
|
||||
m = mock.mock_open(read_data=CONTENT)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.get('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(dict(md5sum=hashlib.md5(CONTENT).hexdigest()),
|
||||
json.loads(rv.data))
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('os.fchmod')
|
||||
@mock.patch('os.makedirs')
|
||||
def test_upload_certificate_md5(self, mock_makedir, mock_chmod,
|
||||
mock_exists):
|
||||
mock_exists.side_effect = [False]
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem',
|
||||
data='TestTest')
|
||||
self.assertEqual(404, rv.status_code)
|
||||
|
||||
# wrong file name
|
||||
mock_exists.side_effect = [True]
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.bla',
|
||||
data='TestTest')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
|
||||
mock_exists.side_effect = [True, True, True]
|
||||
m = mock.mock_open()
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem',
|
||||
data='TestTest')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(OK, json.loads(rv.data))
|
||||
handle = m()
|
||||
handle.write.assert_called_once_with('TestTest')
|
||||
mock_chmod.assert_called_once_with(handle.fileno(), 0o600)
|
||||
|
||||
mock_exists.side_effect = [True, False]
|
||||
m = mock.mock_open()
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.put('/' + api_server.VERSION +
|
||||
'/listeners/123/certificates/test.pem',
|
||||
data='TestTest')
|
||||
self.assertEqual(200, rv.status_code)
|
||||
self.assertEqual(OK, json.loads(rv.data))
|
||||
handle = m()
|
||||
mock_makedir.called_once_with('/var/lib/octavia/123')
|
||||
|
||||
@mock.patch('netifaces.interfaces')
|
||||
@mock.patch('netifaces.ifaddresses')
|
||||
@mock.patch('subprocess.check_output')
|
||||
def test_plug_network(self, mock_check_output, mock_ifaddress,
|
||||
mock_interfaces):
|
||||
# No interface at all
|
||||
mock_interfaces.side_effect = [[]]
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/network")
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(details="No suitable network interface found"),
|
||||
json.loads(rv.data))
|
||||
|
||||
# No interface down
|
||||
mock_interfaces.side_effect = [['blah']]
|
||||
mock_ifaddress.side_effect = [[netifaces.AF_INET]]
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/network")
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(details="No suitable network interface found"),
|
||||
json.loads(rv.data))
|
||||
mock_ifaddress.assert_called_once_with('blah')
|
||||
|
||||
# One Interface down, Happy Path
|
||||
mock_interfaces.side_effect = [['blah']]
|
||||
mock_ifaddress.side_effect = [['bla']]
|
||||
m = mock.mock_open()
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/network")
|
||||
self.assertEqual(202, rv.status_code)
|
||||
m.assert_called_once_with(
|
||||
'/etc/network/interfaces.d/blah.cfg', 'w')
|
||||
handle = m()
|
||||
handle.write.assert_called_once_with(
|
||||
'\n# Generated by Octavia agent\n'
|
||||
'auto blah blah:0\n'
|
||||
'iface blah inet dhcp')
|
||||
mock_check_output.assert_called_with(
|
||||
['ifup', 'blah:0'], stderr=-2)
|
||||
|
||||
# same as above but ifup fails
|
||||
mock_interfaces.side_effect = [['blah']]
|
||||
mock_ifaddress.side_effect = [['bla']]
|
||||
mock_check_output.side_effect = [subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR)]
|
||||
m = mock.mock_open()
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/network")
|
||||
self.assertEqual(500, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'details': RANDOM_ERROR,
|
||||
'message': 'Error plugging network'},
|
||||
json.loads(rv.data))
|
||||
|
||||
@mock.patch('netifaces.interfaces')
|
||||
@mock.patch('netifaces.ifaddresses')
|
||||
@mock.patch('subprocess.check_output')
|
||||
def test_plug_VIP(self, mock_check_output, mock_ifaddress,
|
||||
mock_interfaces):
|
||||
# malformated ip
|
||||
rv = self.app.post('/' + api_server.VERSION + '/plug/vip/error')
|
||||
self.assertEqual(400, rv.status_code)
|
||||
|
||||
# No interface at all
|
||||
mock_interfaces.side_effect = [[]]
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2")
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(details="No suitable network interface found"),
|
||||
json.loads(rv.data))
|
||||
|
||||
# Two interfaces down
|
||||
mock_interfaces.side_effect = [['blah', 'blah2']]
|
||||
mock_ifaddress.side_effect = [['blabla'], ['blabla']]
|
||||
rv = self.app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2")
|
||||
self.assertEqual(404, rv.status_code)
|
||||
self.assertEqual(dict(details="No suitable network interface found"),
|
||||
json.loads(rv.data))
|
||||
|
||||
# One Interface down, Happy Path
|
||||
mock_interfaces.side_effect = [['blah']]
|
||||
mock_ifaddress.side_effect = [['bla']]
|
||||
m = mock.mock_open()
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.post('/' + api_server.VERSION +
|
||||
"/plug/vip/203.0.113.2")
|
||||
self.assertEqual(202, rv.status_code)
|
||||
m.assert_called_once_with(
|
||||
'/etc/network/interfaces.d/blah.cfg', 'w')
|
||||
handle = m()
|
||||
handle.write.assert_called_once_with(
|
||||
'\n# Generated by Octavia agent\n'
|
||||
'auto blah blah:0\n'
|
||||
'iface blah inet dhcp\n'
|
||||
'iface blah:0 inet static\n'
|
||||
'address 203.0.113.2\n'
|
||||
'broadcast 203.0.113.255\n'
|
||||
'netmask 255.255.255.0')
|
||||
mock_check_output.assert_called_with(
|
||||
['ifup', 'blah:0'], stderr=-2)
|
||||
|
||||
mock_interfaces.side_effect = [['blah']]
|
||||
mock_ifaddress.side_effect = [['blah']]
|
||||
mock_check_output.side_effect = [
|
||||
'unplug1',
|
||||
subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
|
||||
7, 'test', RANDOM_ERROR)]
|
||||
m = mock.mock_open()
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
rv = self.app.post('/' + api_server.VERSION +
|
||||
"/plug/vip/203.0.113.2")
|
||||
self.assertEqual(500, rv.status_code)
|
||||
self.assertEqual(
|
||||
{'details': RANDOM_ERROR,
|
||||
'message': 'Error plugging VIP'},
|
||||
json.loads(rv.data))
|
@ -0,0 +1,124 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from octavia.amphorae.backends.agent.api_server import listener
|
||||
from octavia.amphorae.drivers.haproxy.jinja import jinja_cfg
|
||||
from octavia.common import constants as consts
|
||||
import octavia.tests.unit.base as base
|
||||
from octavia.tests.unit.common.sample_configs import sample_configs
|
||||
|
||||
|
||||
class ListenerTestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
super(ListenerTestCase, self).setUp()
|
||||
self.jinja_cfg = jinja_cfg.JinjaTemplater(
|
||||
base_amp_path='/var/lib/octavia',
|
||||
base_crt_dir='/listeners')
|
||||
|
||||
def test_parse_haproxy_config(self):
|
||||
# template_tls
|
||||
tls_tupe = sample_configs.sample_tls_container_tuple(
|
||||
certificate='imaCert1', private_key='imaPrivateKey1',
|
||||
primary_cn='FakeCN')
|
||||
rendered_obj = self.jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs.sample_listener_tuple(proto='TERMINATED_HTTPS',
|
||||
tls=True, sni=True),
|
||||
tls_tupe)
|
||||
|
||||
m = mock.mock_open(read_data=rendered_obj)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
res = listener._parse_haproxy_file('123')
|
||||
self.assertEqual('TERMINATED_HTTPS', res['mode'])
|
||||
self.assertEqual('/var/lib/octavia/sample_listener_id_1.sock',
|
||||
res['stats_socket'])
|
||||
self.assertEqual(
|
||||
'/var/lib/octavia/listeners/sample_listener_id_1/FakeCN.pem',
|
||||
res['ssl_crt'])
|
||||
|
||||
# render_template_tls_no_sni
|
||||
rendered_obj = self.jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs.sample_listener_tuple(
|
||||
proto='TERMINATED_HTTPS', tls=True),
|
||||
tls_cert=sample_configs.sample_tls_container_tuple(
|
||||
certificate='ImAalsdkfjCert',
|
||||
private_key='ImAsdlfksdjPrivateKey',
|
||||
primary_cn="FakeCN"))
|
||||
|
||||
m = mock.mock_open(read_data=rendered_obj)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
res = listener._parse_haproxy_file('123')
|
||||
self.assertEqual('TERMINATED_HTTPS', res['mode'])
|
||||
self.assertEqual('/var/lib/octavia/sample_listener_id_1.sock',
|
||||
res['stats_socket'])
|
||||
self.assertEqual(
|
||||
'/var/lib/octavia/listeners/sample_listener_id_1/FakeCN.pem',
|
||||
res['ssl_crt'])
|
||||
|
||||
# render_template_http
|
||||
rendered_obj = self.jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs.sample_listener_tuple())
|
||||
m = mock.mock_open(read_data=rendered_obj)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
res = listener._parse_haproxy_file('123')
|
||||
self.assertEqual('HTTP', res['mode'])
|
||||
self.assertEqual('/var/lib/octavia/sample_listener_id_1.sock',
|
||||
res['stats_socket'])
|
||||
self.assertIsNone(res['ssl_crt'])
|
||||
|
||||
# template_https
|
||||
rendered_obj = self.jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs.sample_listener_tuple(proto='HTTPS'))
|
||||
m = mock.mock_open(read_data=rendered_obj)
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
res = listener._parse_haproxy_file('123')
|
||||
self.assertEqual('TCP', res['mode'])
|
||||
self.assertEqual('/var/lib/octavia/sample_listener_id_1.sock',
|
||||
res['stats_socket'])
|
||||
self.assertIsNone(res['ssl_crt'])
|
||||
|
||||
# Bogus format
|
||||
m = mock.mock_open(read_data='Bogus')
|
||||
|
||||
with mock.patch('__builtin__.open', m, create=True):
|
||||
try:
|
||||
res = listener._parse_haproxy_file('123')
|
||||
self.fail("No Exception?")
|
||||
except listener.ParsingError:
|
||||
pass
|
||||
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server'
|
||||
+ '.util.get_haproxy_pid')
|
||||
def test_check_listener_status(self, mock_pid, mock_exists):
|
||||
mock_pid.return_value = '1245'
|
||||
mock_exists.side_effect = [True, True]
|
||||
self.assertEqual(
|
||||
consts.ACTIVE,
|
||||
listener._check_listener_status('123'))
|
||||
|
||||
mock_exists.side_effect = [True, False]
|
||||
self.assertEqual(
|
||||
consts.ERROR,
|
||||
listener._check_listener_status('123'))
|
||||
|
||||
mock_exists.side_effect = [False]
|
||||
self.assertEqual(
|
||||
consts.OFFLINE,
|
||||
listener._check_listener_status('123'))
|
@ -0,0 +1,72 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from octavia.amphorae.backends.utils import haproxy_query as query
|
||||
import octavia.tests.unit.base as base
|
||||
|
||||
STATS_SOCKET_SAMPLE = [
|
||||
"# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,"
|
||||
"econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,"
|
||||
"downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,"
|
||||
"rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp"
|
||||
"_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot"
|
||||
",cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,"
|
||||
"last_agt,qtime,ctime,rtime,ttime,",
|
||||
|
||||
"http-servers,id-34821,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,1,1,575,575"
|
||||
",,1,3,1,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,,,0,0,0,0,",
|
||||
|
||||
"http-servers,id-34824,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,1,1,567,567,"
|
||||
",1,3,2,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,,,0,0,0,0,",
|
||||
|
||||
"http-servers,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,DOWN,0,0,0,,1,567,567"
|
||||
",,1,3,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,0,0,0,0,-1,,,0,0,0,0,",
|
||||
|
||||
"tcp-servers,id-34833,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,1,1,"
|
||||
"560,560,,1,5,1,,0,,2,0,,0,L4TOUT,,30000,,,,,,,0,,,,0,0,,,,,-1,,"
|
||||
",0,0,0,0,",
|
||||
|
||||
"tcp-servers,id-34836,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,1,1,552,552,,"
|
||||
"1,5,2,,0,,2,0,,0,L4TOUT,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0,",
|
||||
|
||||
"tcp-servers,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,DOWN,0,0,0,,1,552,552"
|
||||
",,1,5,0,,0,,1,0,,0,,,,,,,,,,,,,,0,0,0,0,0,0,-1,,,0,0,0,0,"]
|
||||
|
||||
|
||||
class QueryTestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
self.q = query.HAProxyQuery('')
|
||||
super(QueryTestCase, self).setUp()
|
||||
|
||||
def test_get_pool_status(self):
|
||||
stat_mock = mock.Mock()
|
||||
self.q.show_stat = stat_mock
|
||||
stat_mock.return_value = STATS_SOCKET_SAMPLE
|
||||
self.assertEqual(
|
||||
{'tcp-servers':
|
||||
{'status': 'DOWN',
|
||||
'uuid': 'tcp-servers',
|
||||
'members': [
|
||||
{'id-34833': 'DOWN'},
|
||||
{'id-34836': 'DOWN'}]},
|
||||
'http-servers': {
|
||||
'status': 'DOWN',
|
||||
'uuid': 'http-servers',
|
||||
'members': [
|
||||
{'id-34821': 'DOWN'},
|
||||
{'id-34824': 'DOWN'}]}},
|
||||
self.q.get_pool_status()
|
||||
)
|
@ -40,3 +40,8 @@ Jinja2>=2.6 # BSD License (3 clause)
|
||||
paramiko>=1.13.0
|
||||
taskflow>=0.7.1
|
||||
networkx>=1.8
|
||||
|
||||
#for the amphora api
|
||||
Flask
|
||||
netifaces
|
||||
|
||||
|
@ -39,6 +39,7 @@ console_scripts =
|
||||
octavia-worker = octavia.cmd.octavia_worker:main
|
||||
octavia-health-manager = octavia.cmd.health_manager:main
|
||||
octavia-housekeeping = octavia.cmd.housekeeping:main
|
||||
amphora-agent = octavia.cmd.agent:main
|
||||
octavia.api.handlers =
|
||||
simulated_handler = octavia.api.v1.handlers.controller_simulator.handler:SimulatedControllerHandler
|
||||
queue_producer = octavia.api.v1.handlers.queue.producer:ProducerHandler
|
||||
|
Loading…
x
Reference in New Issue
Block a user