[gnuoy,r=james-page] Add HA support
This commit is contained in:
commit
7a16590591
@ -5,5 +5,12 @@ include:
|
|||||||
- fetch
|
- fetch
|
||||||
- contrib.storage.linux:
|
- contrib.storage.linux:
|
||||||
- utils
|
- utils
|
||||||
|
- contrib.hahelpers:
|
||||||
|
- apache
|
||||||
|
- cluster
|
||||||
- payload.execd
|
- payload.execd
|
||||||
- contrib.openstack.alternatives
|
- contrib.openstack|inc=*
|
||||||
|
- contrib.network.ip
|
||||||
|
- contrib.openstack.ip
|
||||||
|
- contrib.storage.linux
|
||||||
|
- contrib.python.packages
|
||||||
|
20
config.yaml
20
config.yaml
@ -67,3 +67,23 @@ options:
|
|||||||
.
|
.
|
||||||
Enable this option to disable use of Apache and enable the embedded
|
Enable this option to disable use of Apache and enable the embedded
|
||||||
web container feature.
|
web container feature.
|
||||||
|
vip:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Virtual IP(s) to use to front API services in HA configuration.
|
||||||
|
.
|
||||||
|
If multiple networks are being used, a VIP should be provided for each
|
||||||
|
network, separated by spaces.
|
||||||
|
ha-bindiface:
|
||||||
|
type: string
|
||||||
|
default: eth0
|
||||||
|
description: |
|
||||||
|
Default network interface on which HA cluster will bind to communication
|
||||||
|
with the other members of the HA Cluster.
|
||||||
|
ha-mcastport:
|
||||||
|
type: int
|
||||||
|
default: 5414
|
||||||
|
description: |
|
||||||
|
Default multicast port number that will be used to communicate between
|
||||||
|
HA Cluster nodes.
|
||||||
|
11
files/ports.conf
Normal file
11
files/ports.conf
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
Listen 70
|
||||||
|
|
||||||
|
<IfModule ssl_module>
|
||||||
|
Listen 443
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
<IfModule mod_gnutls.c>
|
||||||
|
Listen 443
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
29
hooks/ceph_radosgw_context.py
Normal file
29
hooks/ceph_radosgw_context.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
determine_api_port,
|
||||||
|
determine_apache_port,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HAProxyContext(context.HAProxyContext):
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = super(HAProxyContext, self).__call__()
|
||||||
|
|
||||||
|
# Apache ports
|
||||||
|
a_cephradosgw_api = determine_apache_port(80,
|
||||||
|
singlenode_mode=True)
|
||||||
|
|
||||||
|
port_mapping = {
|
||||||
|
'cephradosgw-server': [
|
||||||
|
80, a_cephradosgw_api]
|
||||||
|
}
|
||||||
|
|
||||||
|
ctxt['cephradosgw_bind_port'] = determine_api_port(
|
||||||
|
80,
|
||||||
|
singlenode_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# for haproxy.conf
|
||||||
|
ctxt['service_ports'] = port_mapping
|
||||||
|
return ctxt
|
@ -0,0 +1,22 @@
|
|||||||
|
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||||
|
# only standard libraries.
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import six # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||||
|
import six # flake8: noqa
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||||
|
import yaml # flake8: noqa
|
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
66
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
66
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config as config_get,
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cert(cn=None):
|
||||||
|
# TODO: deal with multiple https endpoints via charm config
|
||||||
|
cert = config_get('ssl_cert')
|
||||||
|
key = config_get('ssl_key')
|
||||||
|
if not (cert and key):
|
||||||
|
log("Inspecting identity-service relations for SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
cert = key = None
|
||||||
|
if cn:
|
||||||
|
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
||||||
|
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
||||||
|
else:
|
||||||
|
ssl_cert_attr = 'ssl_cert'
|
||||||
|
ssl_key_attr = 'ssl_key'
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if not cert:
|
||||||
|
cert = relation_get(ssl_cert_attr,
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
if not key:
|
||||||
|
key = relation_get(ssl_key_attr,
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return (cert, key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ca_cert():
|
||||||
|
ca_cert = config_get('ssl_ca')
|
||||||
|
if ca_cert is None:
|
||||||
|
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if ca_cert is None:
|
||||||
|
ca_cert = relation_get('ca_cert',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return ca_cert
|
||||||
|
|
||||||
|
|
||||||
|
def install_ca_cert(ca_cert):
|
||||||
|
if ca_cert:
|
||||||
|
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||||
|
'w') as crt:
|
||||||
|
crt.write(ca_cert)
|
||||||
|
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
248
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
248
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for clustering and determining "cluster leadership" and other
|
||||||
|
clustering-related helpers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
from socket import gethostname as get_unit_hostname
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
relation_get,
|
||||||
|
config as config_get,
|
||||||
|
INFO,
|
||||||
|
ERROR,
|
||||||
|
WARNING,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.decorators import (
|
||||||
|
retry_on_exception,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HAIncompleteConfig(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CRMResourceNotFound(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_elected_leader(resource):
|
||||||
|
"""
|
||||||
|
Returns True if the charm executing this is the elected cluster leader.
|
||||||
|
|
||||||
|
It relies on two mechanisms to determine leadership:
|
||||||
|
1. If the charm is part of a corosync cluster, call corosync to
|
||||||
|
determine leadership.
|
||||||
|
2. If the charm is not part of a corosync cluster, the leader is
|
||||||
|
determined as being "the alive unit with the lowest unit numer". In
|
||||||
|
other words, the oldest surviving unit.
|
||||||
|
"""
|
||||||
|
if is_clustered():
|
||||||
|
if not is_crm_leader(resource):
|
||||||
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
peers = peer_units()
|
||||||
|
if peers and not oldest_peer(peers):
|
||||||
|
log('Deferring action to oldest service unit.', level=INFO)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_clustered():
|
||||||
|
for r_id in (relation_ids('ha') or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
clustered = relation_get('clustered',
|
||||||
|
rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
if clustered:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
||||||
|
def is_crm_leader(resource, retry=False):
|
||||||
|
"""
|
||||||
|
Returns True if the charm calling this is the elected corosync leader,
|
||||||
|
as returned by calling the external "crm" command.
|
||||||
|
|
||||||
|
We allow this operation to be retried to avoid the possibility of getting a
|
||||||
|
false negative. See LP #1396246 for more info.
|
||||||
|
"""
|
||||||
|
cmd = ['crm', 'resource', 'show', resource]
|
||||||
|
try:
|
||||||
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
if not isinstance(status, six.text_type):
|
||||||
|
status = six.text_type(status, "utf-8")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
status = None
|
||||||
|
|
||||||
|
if status and get_unit_hostname() in status:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if status and "resource %s is NOT running" % (resource) in status:
|
||||||
|
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_leader(resource):
|
||||||
|
log("is_leader is deprecated. Please consider using is_crm_leader "
|
||||||
|
"instead.", level=WARNING)
|
||||||
|
return is_crm_leader(resource)
|
||||||
|
|
||||||
|
|
||||||
|
def peer_units(peer_relation="cluster"):
|
||||||
|
peers = []
|
||||||
|
for r_id in (relation_ids(peer_relation) or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
peers.append(unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||||
|
'''Return a dict of peers and their private-address'''
|
||||||
|
peers = {}
|
||||||
|
for r_id in relation_ids(peer_relation):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def oldest_peer(peers):
|
||||||
|
"""Determines who the oldest peer is by comparing unit numbers."""
|
||||||
|
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||||
|
for peer in peers:
|
||||||
|
remote_unit_no = int(peer.split('/')[1])
|
||||||
|
if remote_unit_no < local_unit_no:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def eligible_leader(resource):
|
||||||
|
log("eligible_leader is deprecated. Please consider using "
|
||||||
|
"is_elected_leader instead.", level=WARNING)
|
||||||
|
return is_elected_leader(resource)
|
||||||
|
|
||||||
|
|
||||||
|
def https():
|
||||||
|
'''
|
||||||
|
Determines whether enough data has been provided in configuration
|
||||||
|
or relation data to configure HTTPS
|
||||||
|
.
|
||||||
|
returns: boolean
|
||||||
|
'''
|
||||||
|
if config_get('use-https') == "yes":
|
||||||
|
return True
|
||||||
|
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||||
|
return True
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
||||||
|
rel_state = [
|
||||||
|
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||||
|
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||||
|
]
|
||||||
|
# NOTE: works around (LP: #1203241)
|
||||||
|
if (None not in rel_state) and ('' not in rel_state):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def determine_api_port(public_port, singlenode_mode=False):
|
||||||
|
'''
|
||||||
|
Determine correct API server listening port based on
|
||||||
|
existence of HTTPS reverse proxy and/or haproxy.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the API service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if singlenode_mode:
|
||||||
|
i += 1
|
||||||
|
elif len(peer_units()) > 0 or is_clustered():
|
||||||
|
i += 1
|
||||||
|
if https():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_apache_port(public_port, singlenode_mode=False):
|
||||||
|
'''
|
||||||
|
Description: Determine correct apache listening port based on public IP +
|
||||||
|
state of the cluster.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the HAProxy service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if singlenode_mode:
|
||||||
|
i += 1
|
||||||
|
elif len(peer_units()) > 0 or is_clustered():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hacluster_config():
|
||||||
|
'''
|
||||||
|
Obtains all relevant configuration from charm configuration required
|
||||||
|
for initiating a relation to hacluster:
|
||||||
|
|
||||||
|
ha-bindiface, ha-mcastport, vip
|
||||||
|
|
||||||
|
returns: dict: A dict containing settings keyed by setting name.
|
||||||
|
raises: HAIncompleteConfig if settings are missing.
|
||||||
|
'''
|
||||||
|
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
||||||
|
conf = {}
|
||||||
|
for setting in settings:
|
||||||
|
conf[setting] = config_get(setting)
|
||||||
|
missing = []
|
||||||
|
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
||||||
|
if missing:
|
||||||
|
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||||
|
raise HAIncompleteConfig
|
||||||
|
return conf
|
||||||
|
|
||||||
|
|
||||||
|
def canonical_url(configs, vip_setting='vip'):
|
||||||
|
'''
|
||||||
|
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||||
|
configuration and hacluster.
|
||||||
|
|
||||||
|
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||||
|
a complete https context.
|
||||||
|
|
||||||
|
:vip_setting: str: Setting in charm config that specifies
|
||||||
|
VIP address.
|
||||||
|
'''
|
||||||
|
scheme = 'http'
|
||||||
|
if 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
if is_clustered():
|
||||||
|
addr = config_get(vip_setting)
|
||||||
|
else:
|
||||||
|
addr = unit_get('private-address')
|
||||||
|
return '%s://%s' % (scheme, addr)
|
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
351
hooks/charmhelpers/contrib/network/ip.py
Normal file
351
hooks/charmhelpers/contrib/network/ip.py
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
import glob
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import unit_get
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import netifaces
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-netifaces')
|
||||||
|
import netifaces
|
||||||
|
|
||||||
|
try:
|
||||||
|
import netaddr
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-netaddr')
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_cidr(network):
|
||||||
|
try:
|
||||||
|
netaddr.IPNetwork(network)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||||
|
network)
|
||||||
|
|
||||||
|
|
||||||
|
def no_ip_found_error_out(network):
|
||||||
|
errmsg = ("No IP address found in network: %s" % network)
|
||||||
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_address_in_network(network, fallback=None, fatal=False):
|
||||||
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
|
:param network (str): CIDR presentation format. For example,
|
||||||
|
'192.168.1.0/24'.
|
||||||
|
:param fallback (str): If no address is found, return fallback.
|
||||||
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
|
set and fatal is True then exit(1).
|
||||||
|
"""
|
||||||
|
if network is None:
|
||||||
|
if fallback is not None:
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
no_ip_found_error_out(network)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
_validate_cidr(network)
|
||||||
|
network = netaddr.IPNetwork(network)
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||||
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
|
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
|
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
|
if not addr['addr'].startswith('fe80'):
|
||||||
|
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
|
addr['netmask']))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
|
if fallback is not None:
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
no_ip_found_error_out(network)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_ipv6(address):
|
||||||
|
"""Determine whether provided address is IPv6 or not."""
|
||||||
|
try:
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
except netaddr.AddrFormatError:
|
||||||
|
# probably a hostname - so not an address at all!
|
||||||
|
return False
|
||||||
|
|
||||||
|
return address.version == 6
|
||||||
|
|
||||||
|
|
||||||
|
def is_address_in_network(network, address):
|
||||||
|
"""
|
||||||
|
Determine whether the provided address is within a network range.
|
||||||
|
|
||||||
|
:param network (str): CIDR presentation format. For example,
|
||||||
|
'192.168.1.0/24'.
|
||||||
|
:param address: An individual IPv4 or IPv6 address without a net
|
||||||
|
mask or subnet prefix. For example, '192.168.1.1'.
|
||||||
|
:returns boolean: Flag indicating whether address is in network.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
network = netaddr.IPNetwork(network)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||||
|
network)
|
||||||
|
|
||||||
|
try:
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Address (%s) is not in correct presentation format" %
|
||||||
|
address)
|
||||||
|
|
||||||
|
if address in network:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _get_for_address(address, key):
|
||||||
|
"""Retrieve an attribute of or the physical interface that
|
||||||
|
the IP address provided could be bound to.
|
||||||
|
|
||||||
|
:param address (str): An individual IPv4 or IPv6 address without a net
|
||||||
|
mask or subnet prefix. For example, '192.168.1.1'.
|
||||||
|
:param key: 'iface' for the physical interface name or an attribute
|
||||||
|
of the configured interface, for example 'netmask'.
|
||||||
|
:returns str: Requested attribute or None if address is not bindable.
|
||||||
|
"""
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
if address.version == 4 and netifaces.AF_INET in addresses:
|
||||||
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
|
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
cidr = network.cidr
|
||||||
|
if address in cidr:
|
||||||
|
if key == 'iface':
|
||||||
|
return iface
|
||||||
|
else:
|
||||||
|
return addresses[netifaces.AF_INET][0][key]
|
||||||
|
|
||||||
|
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
|
if not addr['addr'].startswith('fe80'):
|
||||||
|
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
|
addr['netmask']))
|
||||||
|
cidr = network.cidr
|
||||||
|
if address in cidr:
|
||||||
|
if key == 'iface':
|
||||||
|
return iface
|
||||||
|
elif key == 'netmask' and cidr:
|
||||||
|
return str(cidr).split('/')[1]
|
||||||
|
else:
|
||||||
|
return addr[key]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||||
|
|
||||||
|
|
||||||
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def format_ipv6_addr(address):
|
||||||
|
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||||
|
|
||||||
|
This is required by most configuration files when specifying IPv6
|
||||||
|
addresses.
|
||||||
|
"""
|
||||||
|
if is_ipv6(address):
|
||||||
|
return "[%s]" % address
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||||
|
fatal=True, exc_list=None):
|
||||||
|
"""Return the assigned IP address for a given interface, if any."""
|
||||||
|
# Extract nic if passed /dev/ethX
|
||||||
|
if '/' in iface:
|
||||||
|
iface = iface.split('/')[-1]
|
||||||
|
|
||||||
|
if not exc_list:
|
||||||
|
exc_list = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
inet_num = getattr(netifaces, inet_type)
|
||||||
|
except AttributeError:
|
||||||
|
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
||||||
|
|
||||||
|
interfaces = netifaces.interfaces()
|
||||||
|
if inc_aliases:
|
||||||
|
ifaces = []
|
||||||
|
for _iface in interfaces:
|
||||||
|
if iface == _iface or _iface.split(':')[0] == iface:
|
||||||
|
ifaces.append(_iface)
|
||||||
|
|
||||||
|
if fatal and not ifaces:
|
||||||
|
raise Exception("Invalid interface '%s'" % iface)
|
||||||
|
|
||||||
|
ifaces.sort()
|
||||||
|
else:
|
||||||
|
if iface not in interfaces:
|
||||||
|
if fatal:
|
||||||
|
raise Exception("Interface '%s' not found " % (iface))
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
else:
|
||||||
|
ifaces = [iface]
|
||||||
|
|
||||||
|
addresses = []
|
||||||
|
for netiface in ifaces:
|
||||||
|
net_info = netifaces.ifaddresses(netiface)
|
||||||
|
if inet_num in net_info:
|
||||||
|
for entry in net_info[inet_num]:
|
||||||
|
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||||
|
addresses.append(entry['addr'])
|
||||||
|
|
||||||
|
if fatal and not addresses:
|
||||||
|
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||||
|
(iface, inet_type))
|
||||||
|
|
||||||
|
return sorted(addresses)
|
||||||
|
|
||||||
|
|
||||||
|
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||||
|
|
||||||
|
|
||||||
|
def get_iface_from_addr(addr):
|
||||||
|
"""Work out on which interface the provided address is configured."""
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
for inet_type in addresses:
|
||||||
|
for _addr in addresses[inet_type]:
|
||||||
|
_addr = _addr['addr']
|
||||||
|
# link local
|
||||||
|
ll_key = re.compile("(.+)%.*")
|
||||||
|
raw = re.match(ll_key, _addr)
|
||||||
|
if raw:
|
||||||
|
_addr = raw.group(1)
|
||||||
|
|
||||||
|
if _addr == addr:
|
||||||
|
log("Address '%s' is configured on iface '%s'" %
|
||||||
|
(addr, iface))
|
||||||
|
return iface
|
||||||
|
|
||||||
|
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def sniff_iface(f):
|
||||||
|
"""Ensure decorated function is called with a value for iface.
|
||||||
|
|
||||||
|
If no iface provided, inject net iface inferred from unit private address.
|
||||||
|
"""
|
||||||
|
def iface_sniffer(*args, **kwargs):
|
||||||
|
if not kwargs.get('iface', None):
|
||||||
|
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
||||||
|
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
|
||||||
|
return iface_sniffer
|
||||||
|
|
||||||
|
|
||||||
|
@sniff_iface
|
||||||
|
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||||
|
dynamic_only=True):
|
||||||
|
"""Get assigned IPv6 address for a given interface.
|
||||||
|
|
||||||
|
Returns list of addresses found. If no address found, returns empty list.
|
||||||
|
|
||||||
|
If iface is None, we infer the current primary interface by doing a reverse
|
||||||
|
lookup on the unit private-address.
|
||||||
|
|
||||||
|
We currently only support scope global IPv6 addresses i.e. non-temporary
|
||||||
|
addresses. If no global IPv6 address is found, return the first one found
|
||||||
|
in the ipv6 address list.
|
||||||
|
"""
|
||||||
|
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||||
|
inc_aliases=inc_aliases, fatal=fatal,
|
||||||
|
exc_list=exc_list)
|
||||||
|
|
||||||
|
if addresses:
|
||||||
|
global_addrs = []
|
||||||
|
for addr in addresses:
|
||||||
|
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
||||||
|
m = re.match(key_scope_link_local, addr)
|
||||||
|
if m:
|
||||||
|
eui_64_mac = m.group(1)
|
||||||
|
iface = m.group(2)
|
||||||
|
else:
|
||||||
|
global_addrs.append(addr)
|
||||||
|
|
||||||
|
if global_addrs:
|
||||||
|
# Make sure any found global addresses are not temporary
|
||||||
|
cmd = ['ip', 'addr', 'show', iface]
|
||||||
|
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
if dynamic_only:
|
||||||
|
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||||
|
else:
|
||||||
|
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
||||||
|
|
||||||
|
addrs = []
|
||||||
|
for line in out.split('\n'):
|
||||||
|
line = line.strip()
|
||||||
|
m = re.match(key, line)
|
||||||
|
if m and 'temporary' not in line:
|
||||||
|
# Return the first valid address we find
|
||||||
|
for addr in global_addrs:
|
||||||
|
if m.group(1) == addr:
|
||||||
|
if not dynamic_only or \
|
||||||
|
m.group(1).endswith(eui_64_mac):
|
||||||
|
addrs.append(addr)
|
||||||
|
|
||||||
|
if addrs:
|
||||||
|
return addrs
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
raise Exception("Interface '%s' does not have a scope global "
|
||||||
|
"non-temporary ipv6 address." % iface)
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||||
|
"""Return a list of bridges on the system."""
|
||||||
|
b_regex = "%s/*/bridge" % vnic_dir
|
||||||
|
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
||||||
|
|
||||||
|
|
||||||
|
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||||
|
"""Return a list of nics comprising a given bridge on the system."""
|
||||||
|
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||||
|
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
||||||
|
|
||||||
|
|
||||||
|
def is_bridge_member(nic):
|
||||||
|
"""Check if a given nic is a member of a bridge."""
|
||||||
|
for bridge in get_bridges():
|
||||||
|
if nic in get_bridge_nics(bridge):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
92
hooks/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
92
hooks/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
import six
|
||||||
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
|
AmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
"""OpenStack amulet deployment.
|
||||||
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.openstack = openstack
|
||||||
|
self.source = source
|
||||||
|
self.stable = stable
|
||||||
|
# Note(coreycb): this needs to be changed when new next branches come
|
||||||
|
# out.
|
||||||
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def _determine_branch_locations(self, other_services):
|
||||||
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
|
Determine if the local branch being tested is derived from its
|
||||||
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
|
stable or next branches for the other_services."""
|
||||||
|
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
|
||||||
|
|
||||||
|
if self.stable:
|
||||||
|
for svc in other_services:
|
||||||
|
temp = 'lp:charms/{}'
|
||||||
|
svc['location'] = temp.format(svc['name'])
|
||||||
|
else:
|
||||||
|
for svc in other_services:
|
||||||
|
if svc['name'] in base_charms:
|
||||||
|
temp = 'lp:charms/{}'
|
||||||
|
svc['location'] = temp.format(svc['name'])
|
||||||
|
else:
|
||||||
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
|
svc['location'] = temp.format(self.current_next,
|
||||||
|
svc['name'])
|
||||||
|
return other_services
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
services = other_services
|
||||||
|
services.append(this_service)
|
||||||
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
|
||||||
|
if self.openstack:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] not in use_source:
|
||||||
|
config = {'openstack-origin': self.openstack}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
if self.source:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] in use_source:
|
||||||
|
config = {'source': self.source}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _get_openstack_release(self):
|
||||||
|
"""Get openstack release.
|
||||||
|
|
||||||
|
Return an integer representing the enum value of the openstack
|
||||||
|
release.
|
||||||
|
"""
|
||||||
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
|
self.precise_havana, self.precise_icehouse,
|
||||||
|
self.trusty_icehouse) = range(6)
|
||||||
|
releases = {
|
||||||
|
('precise', None): self.precise_essex,
|
||||||
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||||
|
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||||
|
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||||
|
('trusty', None): self.trusty_icehouse}
|
||||||
|
return releases[(self.series, self.openstack)]
|
278
hooks/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
278
hooks/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
import glanceclient.v1.client as glance_client
|
||||||
|
import keystoneclient.v2_0 as keystone_client
|
||||||
|
import novaclient.v1_1.client as nova_client
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
|
AmuletUtils
|
||||||
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
|
This class inherits from AmuletUtils and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=ERROR):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||||
|
|
||||||
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate endpoint data.
|
||||||
|
|
||||||
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
|
are used to find the matching endpoint.
|
||||||
|
"""
|
||||||
|
found = False
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if (admin_port in ep.adminurl and
|
||||||
|
internal_port in ep.internalurl and
|
||||||
|
public_port in ep.publicurl):
|
||||||
|
found = True
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'adminurl': ep.adminurl,
|
||||||
|
'internalurl': ep.internalurl,
|
||||||
|
'publicurl': ep.publicurl,
|
||||||
|
'service_id': ep.service_id}
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
|
expected service catalog endpoints.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_tenant_data(self, expected, actual):
|
||||||
|
"""Validate tenant data.
|
||||||
|
|
||||||
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'description': act.description,
|
||||||
|
'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected tenant data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "tenant {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_role_data(self, expected, actual):
|
||||||
|
"""Validate role data.
|
||||||
|
|
||||||
|
Validate a list of actual role data vs a list of expected role
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected role data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "role {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_user_data(self, expected, actual):
|
||||||
|
"""Validate user data.
|
||||||
|
|
||||||
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'name': act.name,
|
||||||
|
'email': act.email, 'tenantId': act.tenantId,
|
||||||
|
'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected user data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "user {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_flavor_data(self, expected, actual):
|
||||||
|
"""Validate flavor data.
|
||||||
|
|
||||||
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
act = [a.name for a in actual]
|
||||||
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
|
def tenant_exists(self, keystone, tenant):
|
||||||
|
"""Return True if tenant exists."""
|
||||||
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
|
tenant):
|
||||||
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
unit = keystone_sentry
|
||||||
|
service_ip = unit.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_glance_admin(self, keystone):
|
||||||
|
"""Authenticates admin user with glance."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
|
endpoint_type='adminURL')
|
||||||
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return nova_client.Client(username=user, api_key=password,
|
||||||
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def create_cirros_image(self, glance, image_name):
|
||||||
|
"""Download the latest cirros image and upload it to glance."""
|
||||||
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
|
if http_proxy:
|
||||||
|
proxies = {'http': http_proxy}
|
||||||
|
opener = urllib.FancyURLopener(proxies)
|
||||||
|
else:
|
||||||
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
|
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||||
|
version = f.read().strip()
|
||||||
|
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||||
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
|
if not os.path.exists(local_path):
|
||||||
|
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||||
|
version, cirros_img)
|
||||||
|
opener.retrieve(cirros_url, local_path)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
with open(local_path) as f:
|
||||||
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
|
disk_format='qcow2',
|
||||||
|
container_format='bare', data=f)
|
||||||
|
count = 1
|
||||||
|
status = image.status
|
||||||
|
while status != 'active' and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
image = glance.images.get(image.id)
|
||||||
|
status = image.status
|
||||||
|
self.log.debug('image status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'active':
|
||||||
|
self.log.error('image creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def delete_image(self, glance, image):
|
||||||
|
"""Delete the specified image."""
|
||||||
|
num_before = len(list(glance.images.list()))
|
||||||
|
glance.images.delete(image)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
self.log.debug('number of images: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('image deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
|
"""Create the specified instance."""
|
||||||
|
image = nova.images.find(name=image_name)
|
||||||
|
flavor = nova.flavors.find(name=flavor)
|
||||||
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
|
flavor=flavor)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
status = instance.status
|
||||||
|
while status != 'ACTIVE' and count < 60:
|
||||||
|
time.sleep(3)
|
||||||
|
instance = nova.servers.get(instance.id)
|
||||||
|
status = instance.status
|
||||||
|
self.log.debug('instance status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'ACTIVE':
|
||||||
|
self.log.error('instance creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def delete_instance(self, nova, instance):
|
||||||
|
"""Delete the specified instance."""
|
||||||
|
num_before = len(list(nova.servers.list()))
|
||||||
|
nova.servers.delete(instance)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
self.log.debug('number of instances: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('instance deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
1038
hooks/charmhelpers/contrib/openstack/context.py
Normal file
1038
hooks/charmhelpers/contrib/openstack/context.py
Normal file
File diff suppressed because it is too large
Load Diff
93
hooks/charmhelpers/contrib/openstack/ip.py
Normal file
93
hooks/charmhelpers/contrib/openstack/ip.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_address_in_network,
|
||||||
|
is_address_in_network,
|
||||||
|
is_ipv6,
|
||||||
|
get_ipv6_addr,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
|
PUBLIC = 'public'
|
||||||
|
INTERNAL = 'int'
|
||||||
|
ADMIN = 'admin'
|
||||||
|
|
||||||
|
ADDRESS_MAP = {
|
||||||
|
PUBLIC: {
|
||||||
|
'config': 'os-public-network',
|
||||||
|
'fallback': 'public-address'
|
||||||
|
},
|
||||||
|
INTERNAL: {
|
||||||
|
'config': 'os-internal-network',
|
||||||
|
'fallback': 'private-address'
|
||||||
|
},
|
||||||
|
ADMIN: {
|
||||||
|
'config': 'os-admin-network',
|
||||||
|
'fallback': 'private-address'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def canonical_url(configs, endpoint_type=PUBLIC):
|
||||||
|
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
||||||
|
configuration, hacluster and charm configuration.
|
||||||
|
|
||||||
|
:param configs: OSTemplateRenderer config templating object to inspect
|
||||||
|
for a complete https context.
|
||||||
|
:param endpoint_type: str endpoint type to resolve.
|
||||||
|
:param returns: str base URL for services on the current service unit.
|
||||||
|
"""
|
||||||
|
scheme = 'http'
|
||||||
|
if 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
address = resolve_address(endpoint_type)
|
||||||
|
if is_ipv6(address):
|
||||||
|
address = "[{}]".format(address)
|
||||||
|
return '%s://%s' % (scheme, address)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_address(endpoint_type=PUBLIC):
|
||||||
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
|
If unit is clustered with vip(s) and has net splits defined, return vip on
|
||||||
|
correct network. If clustered with no nets defined, return primary vip.
|
||||||
|
|
||||||
|
If not clustered, return unit address ensuring address is on configured net
|
||||||
|
split if one is configured.
|
||||||
|
|
||||||
|
:param endpoint_type: Network endpoing type
|
||||||
|
"""
|
||||||
|
resolved_address = None
|
||||||
|
vips = config('vip')
|
||||||
|
if vips:
|
||||||
|
vips = vips.split()
|
||||||
|
|
||||||
|
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||||
|
net_addr = config(net_type)
|
||||||
|
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||||
|
clustered = is_clustered()
|
||||||
|
if clustered:
|
||||||
|
if not net_addr:
|
||||||
|
# If no net-splits defined, we expect a single vip
|
||||||
|
resolved_address = vips[0]
|
||||||
|
else:
|
||||||
|
for vip in vips:
|
||||||
|
if is_address_in_network(net_addr, vip):
|
||||||
|
resolved_address = vip
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if config('prefer-ipv6'):
|
||||||
|
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||||
|
else:
|
||||||
|
fallback_addr = unit_get(net_fallback)
|
||||||
|
|
||||||
|
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||||
|
|
||||||
|
if resolved_address is None:
|
||||||
|
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||||
|
"charm state and configuration. (net_type=%s, "
|
||||||
|
"clustered=%s)" % (net_type, clustered))
|
||||||
|
|
||||||
|
return resolved_address
|
223
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
223
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||||
|
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import os_release
|
||||||
|
|
||||||
|
|
||||||
|
def headers_package():
|
||||||
|
"""Ensures correct linux-headers for running kernel are installed,
|
||||||
|
for building DKMS package"""
|
||||||
|
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||||
|
return 'linux-headers-%s' % kver
|
||||||
|
|
||||||
|
QUANTUM_CONF_DIR = '/etc/quantum'
|
||||||
|
|
||||||
|
|
||||||
|
def kernel_version():
|
||||||
|
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||||
|
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||||
|
kver = kver.split('.')
|
||||||
|
return (int(kver[0]), int(kver[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def determine_dkms_package():
|
||||||
|
""" Determine which DKMS package should be used based on kernel version """
|
||||||
|
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
||||||
|
if kernel_version() >= (3, 13):
|
||||||
|
return []
|
||||||
|
else:
|
||||||
|
return ['openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
|
||||||
|
|
||||||
|
def quantum_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
return {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/quantum/plugins/openvswitch/'
|
||||||
|
'ovs_quantum_plugin.ini',
|
||||||
|
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||||
|
'OVSQuantumPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['quantum-plugin-openvswitch-agent']],
|
||||||
|
'server_packages': ['quantum-server',
|
||||||
|
'quantum-plugin-openvswitch'],
|
||||||
|
'server_services': ['quantum-server']
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'QuantumPlugin.NvpPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['quantum-server',
|
||||||
|
'quantum-plugin-nicira'],
|
||||||
|
'server_services': ['quantum-server']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NEUTRON_CONF_DIR = '/etc/neutron'
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
release = os_release('nova-common')
|
||||||
|
plugins = {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/neutron/plugins/openvswitch/'
|
||||||
|
'ovs_neutron_plugin.ini',
|
||||||
|
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||||
|
'OVSNeutronPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['neutron-plugin-openvswitch-agent']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-openvswitch'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'NeutronPlugin.NvpPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-nicira'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'nsx': {
|
||||||
|
'config': '/etc/neutron/plugins/vmware/nsx.ini',
|
||||||
|
'driver': 'vmware',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-vmware'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'n1kv': {
|
||||||
|
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
|
||||||
|
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['neutron-plugin-cisco']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-cisco'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'Calico': {
|
||||||
|
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
||||||
|
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': ['calico-felix',
|
||||||
|
'bird',
|
||||||
|
'neutron-dhcp-agent',
|
||||||
|
'nova-api-metadata'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['calico-compute',
|
||||||
|
'bird',
|
||||||
|
'neutron-dhcp-agent',
|
||||||
|
'nova-api-metadata']],
|
||||||
|
'server_packages': ['neutron-server', 'calico-control'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if release >= 'icehouse':
|
||||||
|
# NOTE: patch in ml2 plugin for icehouse onwards
|
||||||
|
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||||
|
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||||
|
plugins['ovs']['server_packages'] = ['neutron-server',
|
||||||
|
'neutron-plugin-ml2']
|
||||||
|
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
||||||
|
plugins['nvp'] = plugins['nsx']
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||||
|
manager = net_manager or network_manager()
|
||||||
|
if manager == 'quantum':
|
||||||
|
plugins = quantum_plugins()
|
||||||
|
elif manager == 'neutron':
|
||||||
|
plugins = neutron_plugins()
|
||||||
|
else:
|
||||||
|
log("Network manager '%s' does not support plugins." % (manager),
|
||||||
|
level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
_plugin = plugins[plugin]
|
||||||
|
except KeyError:
|
||||||
|
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
return _plugin[attr]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def network_manager():
|
||||||
|
'''
|
||||||
|
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||||
|
that require compatability (eg, deploying H with network-manager=quantum,
|
||||||
|
upgrading from G).
|
||||||
|
'''
|
||||||
|
release = os_release('nova-common')
|
||||||
|
manager = config('network-manager').lower()
|
||||||
|
|
||||||
|
if manager not in ['quantum', 'neutron']:
|
||||||
|
return manager
|
||||||
|
|
||||||
|
if release in ['essex']:
|
||||||
|
# E does not support neutron
|
||||||
|
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
elif release in ['folsom', 'grizzly']:
|
||||||
|
# neutron is named quantum in F and G
|
||||||
|
return 'quantum'
|
||||||
|
else:
|
||||||
|
# ensure accurate naming for all releases post-H
|
||||||
|
return 'neutron'
|
@ -0,0 +1,2 @@
|
|||||||
|
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||||
|
# module
|
15
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
15
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
[global]
|
||||||
|
{% if auth -%}
|
||||||
|
auth_supported = {{ auth }}
|
||||||
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
|
mon host = {{ mon_hosts }}
|
||||||
|
{% endif -%}
|
||||||
|
log to syslog = {{ use_syslog }}
|
||||||
|
err to syslog = {{ use_syslog }}
|
||||||
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
58
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
58
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
global
|
||||||
|
log {{ local_host }} local0
|
||||||
|
log {{ local_host }} local1 notice
|
||||||
|
maxconn 20000
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
spread-checks 0
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode tcp
|
||||||
|
option tcplog
|
||||||
|
option dontlognull
|
||||||
|
retries 3
|
||||||
|
timeout queue 1000
|
||||||
|
timeout connect 1000
|
||||||
|
{% if haproxy_client_timeout -%}
|
||||||
|
timeout client {{ haproxy_client_timeout }}
|
||||||
|
{% else -%}
|
||||||
|
timeout client 30000
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if haproxy_server_timeout -%}
|
||||||
|
timeout server {{ haproxy_server_timeout }}
|
||||||
|
{% else -%}
|
||||||
|
timeout server 30000
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
listen stats {{ stat_port }}
|
||||||
|
mode http
|
||||||
|
stats enable
|
||||||
|
stats hide-version
|
||||||
|
stats realm Haproxy\ Statistics
|
||||||
|
stats uri /
|
||||||
|
stats auth admin:password
|
||||||
|
|
||||||
|
{% if frontends -%}
|
||||||
|
{% for service, ports in service_ports.items() -%}
|
||||||
|
frontend tcp-in_{{ service }}
|
||||||
|
bind *:{{ ports[0] }}
|
||||||
|
{% if ipv6 -%}
|
||||||
|
bind :::{{ ports[0] }}
|
||||||
|
{% endif -%}
|
||||||
|
{% for frontend in frontends -%}
|
||||||
|
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
||||||
|
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
||||||
|
{% endfor -%}
|
||||||
|
default_backend {{ service }}_{{ default_backend }}
|
||||||
|
|
||||||
|
{% for frontend in frontends -%}
|
||||||
|
backend {{ service }}_{{ frontend }}
|
||||||
|
balance leastconn
|
||||||
|
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
||||||
|
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor -%}
|
||||||
|
{% endfor -%}
|
||||||
|
{% endif -%}
|
@ -0,0 +1,24 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext_port in ext_ports -%}
|
||||||
|
Listen {{ ext_port }}
|
||||||
|
{% endfor -%}
|
||||||
|
{% for address, endpoint, ext, int in endpoints -%}
|
||||||
|
<VirtualHost {{ address }}:{{ ext }}>
|
||||||
|
ServerName {{ endpoint }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
{% endfor -%}
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endif -%}
|
@ -0,0 +1,24 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext_port in ext_ports -%}
|
||||||
|
Listen {{ ext_port }}
|
||||||
|
{% endfor -%}
|
||||||
|
{% for address, endpoint, ext, int in endpoints -%}
|
||||||
|
<VirtualHost {{ address }}:{{ ext }}>
|
||||||
|
ServerName {{ endpoint }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
{% endfor -%}
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endif -%}
|
279
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
279
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
# python-jinja2 may not be installed yet, or we're running unittests.
|
||||||
|
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_loader(templates_dir, os_release):
|
||||||
|
"""
|
||||||
|
Create a jinja2.ChoiceLoader containing template dirs up to
|
||||||
|
and including os_release. If directory template directory
|
||||||
|
is missing at templates_dir, it will be omitted from the loader.
|
||||||
|
templates_dir is added to the bottom of the search list as a base
|
||||||
|
loading dir.
|
||||||
|
|
||||||
|
A charm may also ship a templates dir with this module
|
||||||
|
and it will be appended to the bottom of the search list, eg::
|
||||||
|
|
||||||
|
hooks/charmhelpers/contrib/openstack/templates
|
||||||
|
|
||||||
|
:param templates_dir (str): Base template directory containing release
|
||||||
|
sub-directories.
|
||||||
|
:param os_release (str): OpenStack release codename to construct template
|
||||||
|
loader.
|
||||||
|
:returns: jinja2.ChoiceLoader constructed with a list of
|
||||||
|
jinja2.FilesystemLoaders, ordered in descending
|
||||||
|
order by OpenStack release.
|
||||||
|
"""
|
||||||
|
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||||
|
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
||||||
|
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Templates directory not found @ %s.' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
# the bottom contains tempaltes_dir and possibly a common templates dir
|
||||||
|
# shipped with the helper.
|
||||||
|
loaders = [FileSystemLoader(templates_dir)]
|
||||||
|
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
||||||
|
if os.path.isdir(helper_templates):
|
||||||
|
loaders.append(FileSystemLoader(helper_templates))
|
||||||
|
|
||||||
|
for rel, tmpl_dir in tmpl_dirs:
|
||||||
|
if os.path.isdir(tmpl_dir):
|
||||||
|
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
||||||
|
if rel == os_release:
|
||||||
|
break
|
||||||
|
log('Creating choice loader with dirs: %s' %
|
||||||
|
[l.searchpath for l in loaders], level=INFO)
|
||||||
|
return ChoiceLoader(loaders)
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigTemplate(object):
|
||||||
|
"""
|
||||||
|
Associates a config file template with a list of context generators.
|
||||||
|
Responsible for constructing a template context based on those generators.
|
||||||
|
"""
|
||||||
|
def __init__(self, config_file, contexts):
|
||||||
|
self.config_file = config_file
|
||||||
|
|
||||||
|
if hasattr(contexts, '__call__'):
|
||||||
|
self.contexts = [contexts]
|
||||||
|
else:
|
||||||
|
self.contexts = contexts
|
||||||
|
|
||||||
|
self._complete_contexts = []
|
||||||
|
|
||||||
|
def context(self):
|
||||||
|
ctxt = {}
|
||||||
|
for context in self.contexts:
|
||||||
|
_ctxt = context()
|
||||||
|
if _ctxt:
|
||||||
|
ctxt.update(_ctxt)
|
||||||
|
# track interfaces for every complete context.
|
||||||
|
[self._complete_contexts.append(interface)
|
||||||
|
for interface in context.interfaces
|
||||||
|
if interface not in self._complete_contexts]
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Return a list of interfaces that have atisfied contexts.
|
||||||
|
'''
|
||||||
|
if self._complete_contexts:
|
||||||
|
return self._complete_contexts
|
||||||
|
self.context()
|
||||||
|
return self._complete_contexts
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigRenderer(object):
|
||||||
|
"""
|
||||||
|
This class provides a common templating system to be used by OpenStack
|
||||||
|
charms. It is intended to help charms share common code and templates,
|
||||||
|
and ease the burden of managing config templates across multiple OpenStack
|
||||||
|
releases.
|
||||||
|
|
||||||
|
Basic usage::
|
||||||
|
|
||||||
|
# import some common context generates from charmhelpers
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
|
||||||
|
# Create a renderer object for a specific OS release.
|
||||||
|
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
||||||
|
openstack_release='folsom')
|
||||||
|
# register some config files with context generators.
|
||||||
|
configs.register(config_file='/etc/nova/nova.conf',
|
||||||
|
contexts=[context.SharedDBContext(),
|
||||||
|
context.AMQPContext()])
|
||||||
|
configs.register(config_file='/etc/nova/api-paste.ini',
|
||||||
|
contexts=[context.IdentityServiceContext()])
|
||||||
|
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||||
|
contexts=[context.HAProxyContext()])
|
||||||
|
# write out a single config
|
||||||
|
configs.write('/etc/nova/nova.conf')
|
||||||
|
# write out all registered configs
|
||||||
|
configs.write_all()
|
||||||
|
|
||||||
|
**OpenStack Releases and template loading**
|
||||||
|
|
||||||
|
When the object is instantiated, it is associated with a specific OS
|
||||||
|
release. This dictates how the template loader will be constructed.
|
||||||
|
|
||||||
|
The constructed loader attempts to load the template from several places
|
||||||
|
in the following order:
|
||||||
|
- from the most recent OS release-specific template dir (if one exists)
|
||||||
|
- the base templates_dir
|
||||||
|
- a template directory shipped in the charm with this helper file.
|
||||||
|
|
||||||
|
For the example above, '/tmp/templates' contains the following structure::
|
||||||
|
|
||||||
|
/tmp/templates/nova.conf
|
||||||
|
/tmp/templates/api-paste.ini
|
||||||
|
/tmp/templates/grizzly/api-paste.ini
|
||||||
|
/tmp/templates/havana/api-paste.ini
|
||||||
|
|
||||||
|
Since it was registered with the grizzly release, it first seraches
|
||||||
|
the grizzly directory for nova.conf, then the templates dir.
|
||||||
|
|
||||||
|
When writing api-paste.ini, it will find the template in the grizzly
|
||||||
|
directory.
|
||||||
|
|
||||||
|
If the object were created with folsom, it would fall back to the
|
||||||
|
base templates dir for its api-paste.ini template.
|
||||||
|
|
||||||
|
This system should help manage changes in config files through
|
||||||
|
openstack releases, allowing charms to fall back to the most recently
|
||||||
|
updated config template for a given release
|
||||||
|
|
||||||
|
The haproxy.conf, since it is not shipped in the templates dir, will
|
||||||
|
be loaded from the module directory's template directory, eg
|
||||||
|
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
||||||
|
us to ship common templates (haproxy, apache) with the helpers.
|
||||||
|
|
||||||
|
**Context generators**
|
||||||
|
|
||||||
|
Context generators are used to generate template contexts during hook
|
||||||
|
execution. Doing so may require inspecting service relations, charm
|
||||||
|
config, etc. When registered, a config file is associated with a list
|
||||||
|
of generators. When a template is rendered and written, all context
|
||||||
|
generates are called in a chain to generate the context dictionary
|
||||||
|
passed to the jinja2 template. See context.py for more info.
|
||||||
|
"""
|
||||||
|
def __init__(self, templates_dir, openstack_release):
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Could not locate templates dir %s' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
self.templates_dir = templates_dir
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self.templates = {}
|
||||||
|
self._tmpl_env = None
|
||||||
|
|
||||||
|
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
||||||
|
# if this code is running, the object is created pre-install hook.
|
||||||
|
# jinja2 shouldn't get touched until the module is reloaded on next
|
||||||
|
# hook execution, with proper jinja2 bits successfully imported.
|
||||||
|
apt_install('python-jinja2')
|
||||||
|
|
||||||
|
def register(self, config_file, contexts):
|
||||||
|
"""
|
||||||
|
Register a config file with a list of context generators to be called
|
||||||
|
during rendering.
|
||||||
|
"""
|
||||||
|
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||||
|
contexts=contexts)
|
||||||
|
log('Registered config file: %s' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def _get_tmpl_env(self):
|
||||||
|
if not self._tmpl_env:
|
||||||
|
loader = get_loader(self.templates_dir, self.openstack_release)
|
||||||
|
self._tmpl_env = Environment(loader=loader)
|
||||||
|
|
||||||
|
def _get_template(self, template):
|
||||||
|
self._get_tmpl_env()
|
||||||
|
template = self._tmpl_env.get_template(template)
|
||||||
|
log('Loaded template from %s' % template.filename, level=INFO)
|
||||||
|
return template
|
||||||
|
|
||||||
|
def render(self, config_file):
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
ctxt = self.templates[config_file].context()
|
||||||
|
|
||||||
|
_tmpl = os.path.basename(config_file)
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound:
|
||||||
|
# if no template is found with basename, try looking for it
|
||||||
|
# using a munged full path, eg:
|
||||||
|
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||||
|
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
log('Could not load template from %s by %s or %s.' %
|
||||||
|
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||||
|
level=ERROR)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||||
|
return template.render(ctxt)
|
||||||
|
|
||||||
|
def write(self, config_file):
|
||||||
|
"""
|
||||||
|
Write a single config file, raises if config file is not registered.
|
||||||
|
"""
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
_out = self.render(config_file)
|
||||||
|
|
||||||
|
with open(config_file, 'wb') as out:
|
||||||
|
out.write(_out)
|
||||||
|
|
||||||
|
log('Wrote template %s.' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def write_all(self):
|
||||||
|
"""
|
||||||
|
Write out all registered config files.
|
||||||
|
"""
|
||||||
|
[self.write(k) for k in six.iterkeys(self.templates)]
|
||||||
|
|
||||||
|
def set_release(self, openstack_release):
|
||||||
|
"""
|
||||||
|
Resets the template environment and generates a new template loader
|
||||||
|
based on a the new openstack release.
|
||||||
|
"""
|
||||||
|
self._tmpl_env = None
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self._get_tmpl_env()
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Returns a list of context interfaces that yield a complete context.
|
||||||
|
'''
|
||||||
|
interfaces = []
|
||||||
|
[interfaces.extend(i.complete_contexts())
|
||||||
|
for i in six.itervalues(self.templates)]
|
||||||
|
return interfaces
|
625
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
625
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
@ -0,0 +1,625 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Common python helper functions used for OpenStack charms.
|
||||||
|
from collections import OrderedDict
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log as juju_log,
|
||||||
|
charm_dir,
|
||||||
|
INFO,
|
||||||
|
relation_ids,
|
||||||
|
relation_set
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.lvm import (
|
||||||
|
deactivate_lvm_volume_group,
|
||||||
|
is_lvm_physical_volume,
|
||||||
|
remove_lvm_physical_volume,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_ipv6_addr
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||||
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
|
from charmhelpers.contrib.python.packages import pip_install
|
||||||
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
|
||||||
|
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||||
|
'restricted main multiverse universe')
|
||||||
|
|
||||||
|
|
||||||
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
|
('oneiric', 'diablo'),
|
||||||
|
('precise', 'essex'),
|
||||||
|
('quantal', 'folsom'),
|
||||||
|
('raring', 'grizzly'),
|
||||||
|
('saucy', 'havana'),
|
||||||
|
('trusty', 'icehouse'),
|
||||||
|
('utopic', 'juno'),
|
||||||
|
('vivid', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
OPENSTACK_CODENAMES = OrderedDict([
|
||||||
|
('2011.2', 'diablo'),
|
||||||
|
('2012.1', 'essex'),
|
||||||
|
('2012.2', 'folsom'),
|
||||||
|
('2013.1', 'grizzly'),
|
||||||
|
('2013.2', 'havana'),
|
||||||
|
('2014.1', 'icehouse'),
|
||||||
|
('2014.2', 'juno'),
|
||||||
|
('2015.1', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
# The ugly duckling
|
||||||
|
SWIFT_CODENAMES = OrderedDict([
|
||||||
|
('1.4.3', 'diablo'),
|
||||||
|
('1.4.8', 'essex'),
|
||||||
|
('1.7.4', 'folsom'),
|
||||||
|
('1.8.0', 'grizzly'),
|
||||||
|
('1.7.7', 'grizzly'),
|
||||||
|
('1.7.6', 'grizzly'),
|
||||||
|
('1.10.0', 'havana'),
|
||||||
|
('1.9.1', 'havana'),
|
||||||
|
('1.9.0', 'havana'),
|
||||||
|
('1.13.1', 'icehouse'),
|
||||||
|
('1.13.0', 'icehouse'),
|
||||||
|
('1.12.0', 'icehouse'),
|
||||||
|
('1.11.0', 'icehouse'),
|
||||||
|
('2.0.0', 'juno'),
|
||||||
|
('2.1.0', 'juno'),
|
||||||
|
('2.2.0', 'juno'),
|
||||||
|
('2.2.1', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
|
||||||
|
def error_out(msg):
|
||||||
|
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_install_source(src):
|
||||||
|
'''Derive OpenStack release codename from a given installation source.'''
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = ''
|
||||||
|
if src is None:
|
||||||
|
return rel
|
||||||
|
if src in ['distro', 'distro-proposed']:
|
||||||
|
try:
|
||||||
|
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not derive openstack release for '\
|
||||||
|
'this Ubuntu release: %s' % ubuntu_rel
|
||||||
|
error_out(e)
|
||||||
|
return rel
|
||||||
|
|
||||||
|
if src.startswith('cloud:'):
|
||||||
|
ca_rel = src.split(':')[1]
|
||||||
|
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||||
|
return ca_rel
|
||||||
|
|
||||||
|
# Best guess match based on deb string provided
|
||||||
|
if src.startswith('deb') or src.startswith('ppa'):
|
||||||
|
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||||
|
if v in src:
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_install_source(src):
|
||||||
|
codename = get_os_codename_install_source(src)
|
||||||
|
return get_os_version_codename(codename)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_version(vers):
|
||||||
|
'''Determine OpenStack codename from version number.'''
|
||||||
|
try:
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_codename(codename):
|
||||||
|
'''Determine OpenStack version number from codename.'''
|
||||||
|
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||||
|
if v == codename:
|
||||||
|
return k
|
||||||
|
e = 'Could not derive OpenStack version for '\
|
||||||
|
'codename: %s' % codename
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_package(package, fatal=True):
|
||||||
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
|
import apt_pkg as apt
|
||||||
|
|
||||||
|
cache = apt_cache()
|
||||||
|
|
||||||
|
try:
|
||||||
|
pkg = cache[package]
|
||||||
|
except:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# the package is unknown to the current apt cache.
|
||||||
|
e = 'Could not determine version of package with no installation '\
|
||||||
|
'candidate: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if not pkg.current_ver:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# package is known, but no version is currently installed.
|
||||||
|
e = 'Could not determine version of uninstalled package: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if 'swift' in pkg.name:
|
||||||
|
swift_vers = vers[:5]
|
||||||
|
if swift_vers not in SWIFT_CODENAMES:
|
||||||
|
# Deal with 1.10.0 upward
|
||||||
|
swift_vers = vers[:6]
|
||||||
|
return SWIFT_CODENAMES[swift_vers]
|
||||||
|
else:
|
||||||
|
vers = vers[:6]
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_package(pkg, fatal=True):
|
||||||
|
'''Derive OpenStack version number from an installed package.'''
|
||||||
|
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||||
|
|
||||||
|
if not codename:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'swift' in pkg:
|
||||||
|
vers_map = SWIFT_CODENAMES
|
||||||
|
else:
|
||||||
|
vers_map = OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
for version, cname in six.iteritems(vers_map):
|
||||||
|
if cname == codename:
|
||||||
|
return version
|
||||||
|
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||||
|
# error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
os_rel = None
|
||||||
|
|
||||||
|
|
||||||
|
def os_release(package, base='essex'):
|
||||||
|
'''
|
||||||
|
Returns OpenStack release codename from a cached global.
|
||||||
|
If the codename can not be determined from either an installed package or
|
||||||
|
the installation source, the earliest release supported by the charm should
|
||||||
|
be returned.
|
||||||
|
'''
|
||||||
|
global os_rel
|
||||||
|
if os_rel:
|
||||||
|
return os_rel
|
||||||
|
os_rel = (get_os_codename_package(package, fatal=False) or
|
||||||
|
get_os_codename_install_source(config('openstack-origin')) or
|
||||||
|
base)
|
||||||
|
return os_rel
|
||||||
|
|
||||||
|
|
||||||
|
def import_key(keyid):
|
||||||
|
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
||||||
|
"--recv-keys %s" % keyid
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing repo key %s" % keyid)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_installation_source(rel):
|
||||||
|
'''Configure apt installation source.'''
|
||||||
|
if rel == 'distro':
|
||||||
|
return
|
||||||
|
elif rel == 'distro-proposed':
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
|
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||||
|
elif rel[:4] == "ppa:":
|
||||||
|
src = rel
|
||||||
|
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||||
|
elif rel[:3] == "deb":
|
||||||
|
l = len(rel.split('|'))
|
||||||
|
if l == 2:
|
||||||
|
src, key = rel.split('|')
|
||||||
|
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||||
|
import_key(key)
|
||||||
|
elif l == 1:
|
||||||
|
src = rel
|
||||||
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
elif rel[:6] == 'cloud:':
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = rel.split(':')[1]
|
||||||
|
u_rel = rel.split('-')[0]
|
||||||
|
ca_rel = rel.split('-')[1]
|
||||||
|
|
||||||
|
if u_rel != ubuntu_rel:
|
||||||
|
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||||
|
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if 'staging' in ca_rel:
|
||||||
|
# staging is just a regular PPA.
|
||||||
|
os_rel = ca_rel.split('/')[0]
|
||||||
|
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
||||||
|
cmd = 'add-apt-repository -y %s' % ppa
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
return
|
||||||
|
|
||||||
|
# map charm config options to actual archive pockets.
|
||||||
|
pockets = {
|
||||||
|
'folsom': 'precise-updates/folsom',
|
||||||
|
'folsom/updates': 'precise-updates/folsom',
|
||||||
|
'folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'grizzly': 'precise-updates/grizzly',
|
||||||
|
'grizzly/updates': 'precise-updates/grizzly',
|
||||||
|
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'havana': 'precise-updates/havana',
|
||||||
|
'havana/updates': 'precise-updates/havana',
|
||||||
|
'havana/proposed': 'precise-proposed/havana',
|
||||||
|
'icehouse': 'precise-updates/icehouse',
|
||||||
|
'icehouse/updates': 'precise-updates/icehouse',
|
||||||
|
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'juno': 'trusty-updates/juno',
|
||||||
|
'juno/updates': 'trusty-updates/juno',
|
||||||
|
'juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'kilo': 'trusty-updates/kilo',
|
||||||
|
'kilo/updates': 'trusty-updates/kilo',
|
||||||
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pocket = pockets[ca_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||||
|
apt_install('ubuntu-cloud-keyring', fatal=True)
|
||||||
|
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
else:
|
||||||
|
error_out("Invalid openstack-release specified: %s" % rel)
|
||||||
|
|
||||||
|
|
||||||
|
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||||
|
"""
|
||||||
|
Write an rc file in the charm-delivered directory containing
|
||||||
|
exported environment variables provided by env_vars. Any charm scripts run
|
||||||
|
outside the juju hook environment can source this scriptrc to obtain
|
||||||
|
updated config information necessary to perform health checks or
|
||||||
|
service changes.
|
||||||
|
"""
|
||||||
|
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||||
|
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||||
|
os.mkdir(os.path.dirname(juju_rc_path))
|
||||||
|
with open(juju_rc_path, 'wb') as rc_script:
|
||||||
|
rc_script.write(
|
||||||
|
"#!/bin/bash\n")
|
||||||
|
[rc_script.write('export %s=%s\n' % (u, p))
|
||||||
|
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
||||||
|
|
||||||
|
|
||||||
|
def openstack_upgrade_available(package):
|
||||||
|
"""
|
||||||
|
Determines if an OpenStack upgrade is available from installation
|
||||||
|
source, based on version of installed package.
|
||||||
|
|
||||||
|
:param package: str: Name of installed package.
|
||||||
|
|
||||||
|
:returns: bool: : Returns True if configured installation source offers
|
||||||
|
a newer version of package.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import apt_pkg as apt
|
||||||
|
src = config('openstack-origin')
|
||||||
|
cur_vers = get_os_version_package(package)
|
||||||
|
available_vers = get_os_version_install_source(src)
|
||||||
|
apt.init()
|
||||||
|
return apt.version_compare(available_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_block_device(block_device):
|
||||||
|
'''
|
||||||
|
Confirm block_device, create as loopback if necessary.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to ensure.
|
||||||
|
|
||||||
|
:returns: str: Full path of ensured block device.
|
||||||
|
'''
|
||||||
|
_none = ['None', 'none', None]
|
||||||
|
if (block_device in _none):
|
||||||
|
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
||||||
|
% block_device)
|
||||||
|
|
||||||
|
if block_device.startswith('/dev/'):
|
||||||
|
bdev = block_device
|
||||||
|
elif block_device.startswith('/'):
|
||||||
|
_bd = block_device.split('|')
|
||||||
|
if len(_bd) == 2:
|
||||||
|
bdev, size = _bd
|
||||||
|
else:
|
||||||
|
bdev = block_device
|
||||||
|
size = DEFAULT_LOOPBACK_SIZE
|
||||||
|
bdev = ensure_loopback_device(bdev, size)
|
||||||
|
else:
|
||||||
|
bdev = '/dev/%s' % block_device
|
||||||
|
|
||||||
|
if not is_block_device(bdev):
|
||||||
|
error_out('Failed to locate valid block device at %s' % bdev)
|
||||||
|
|
||||||
|
return bdev
|
||||||
|
|
||||||
|
|
||||||
|
def clean_storage(block_device):
|
||||||
|
'''
|
||||||
|
Ensures a block device is clean. That is:
|
||||||
|
- unmounted
|
||||||
|
- any lvm volume groups are deactivated
|
||||||
|
- any lvm physical device signatures removed
|
||||||
|
- partition table wiped
|
||||||
|
|
||||||
|
:param block_device: str: Full path to block device to clean.
|
||||||
|
'''
|
||||||
|
for mp, d in mounts():
|
||||||
|
if d == block_device:
|
||||||
|
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
||||||
|
(d, mp), level=INFO)
|
||||||
|
umount(mp, persist=True)
|
||||||
|
|
||||||
|
if is_lvm_physical_volume(block_device):
|
||||||
|
deactivate_lvm_volume_group(block_device)
|
||||||
|
remove_lvm_physical_volume(block_device)
|
||||||
|
else:
|
||||||
|
zap_disk(block_device)
|
||||||
|
|
||||||
|
|
||||||
|
def is_ip(address):
|
||||||
|
"""
|
||||||
|
Returns True if address is a valid IP address.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Test to see if already an IPv4 address
|
||||||
|
socket.inet_aton(address)
|
||||||
|
return True
|
||||||
|
except socket.error:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def ns_query(address):
|
||||||
|
try:
|
||||||
|
import dns.resolver
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
if isinstance(address, dns.name.Name):
|
||||||
|
rtype = 'PTR'
|
||||||
|
elif isinstance(address, six.string_types):
|
||||||
|
rtype = 'A'
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
answers = dns.resolver.query(address, rtype)
|
||||||
|
if answers:
|
||||||
|
return str(answers[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_host_ip(hostname):
|
||||||
|
"""
|
||||||
|
Resolves the IP for a given hostname, or returns
|
||||||
|
the input if it is already an IP.
|
||||||
|
"""
|
||||||
|
if is_ip(hostname):
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
return ns_query(hostname)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hostname(address, fqdn=True):
|
||||||
|
"""
|
||||||
|
Resolves hostname for given IP, or returns the input
|
||||||
|
if it is already a hostname.
|
||||||
|
"""
|
||||||
|
if is_ip(address):
|
||||||
|
try:
|
||||||
|
import dns.reversename
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.reversename
|
||||||
|
|
||||||
|
rev = dns.reversename.from_address(address)
|
||||||
|
result = ns_query(rev)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
result = address
|
||||||
|
|
||||||
|
if fqdn:
|
||||||
|
# strip trailing .
|
||||||
|
if result.endswith('.'):
|
||||||
|
return result[:-1]
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
return result.split('.')[0]
|
||||||
|
|
||||||
|
|
||||||
|
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
|
||||||
|
mm_map = {}
|
||||||
|
if os.path.isfile(mm_file):
|
||||||
|
with open(mm_file, 'r') as f:
|
||||||
|
mm_map = json.load(f)
|
||||||
|
return mm_map
|
||||||
|
|
||||||
|
|
||||||
|
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
||||||
|
relation_prefix=None):
|
||||||
|
hosts = get_ipv6_addr(dynamic_only=False)
|
||||||
|
|
||||||
|
kwargs = {'database': database,
|
||||||
|
'username': database_user,
|
||||||
|
'hostname': json.dumps(hosts)}
|
||||||
|
|
||||||
|
if relation_prefix:
|
||||||
|
for key in list(kwargs.keys()):
|
||||||
|
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
||||||
|
del kwargs[key]
|
||||||
|
|
||||||
|
for rid in relation_ids('shared-db'):
|
||||||
|
relation_set(relation_id=rid, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def os_requires_version(ostack_release, pkg):
|
||||||
|
"""
|
||||||
|
Decorator for hook to specify minimum supported release
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@wraps(f)
|
||||||
|
def wrapped_f(*args):
|
||||||
|
if os_release(pkg) < ostack_release:
|
||||||
|
raise Exception("This hook is not supported on releases"
|
||||||
|
" before %s" % ostack_release)
|
||||||
|
f(*args)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def git_install_requested():
|
||||||
|
"""Returns true if openstack-origin-git is specified."""
|
||||||
|
return config('openstack-origin-git') != "None"
|
||||||
|
|
||||||
|
|
||||||
|
requirements_dir = None
|
||||||
|
|
||||||
|
|
||||||
|
def git_clone_and_install(file_name, core_project):
|
||||||
|
"""Clone/install all OpenStack repos specified in yaml config file."""
|
||||||
|
global requirements_dir
|
||||||
|
|
||||||
|
if file_name == "None":
|
||||||
|
return
|
||||||
|
|
||||||
|
yaml_file = os.path.join(charm_dir(), file_name)
|
||||||
|
|
||||||
|
# clone/install the requirements project first
|
||||||
|
installed = _git_clone_and_install_subset(yaml_file,
|
||||||
|
whitelist=['requirements'])
|
||||||
|
if 'requirements' not in installed:
|
||||||
|
error_out('requirements git repository must be specified')
|
||||||
|
|
||||||
|
# clone/install all other projects except requirements and the core project
|
||||||
|
blacklist = ['requirements', core_project]
|
||||||
|
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
|
||||||
|
update_requirements=True)
|
||||||
|
|
||||||
|
# clone/install the core project
|
||||||
|
whitelist = [core_project]
|
||||||
|
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
|
||||||
|
update_requirements=True)
|
||||||
|
if core_project not in installed:
|
||||||
|
error_out('{} git repository must be specified'.format(core_project))
|
||||||
|
|
||||||
|
|
||||||
|
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
|
||||||
|
update_requirements=False):
|
||||||
|
"""Clone/install subset of OpenStack repos specified in yaml config file."""
|
||||||
|
global requirements_dir
|
||||||
|
installed = []
|
||||||
|
|
||||||
|
with open(yaml_file, 'r') as fd:
|
||||||
|
projects = yaml.load(fd)
|
||||||
|
for proj, val in projects.items():
|
||||||
|
# The project subset is chosen based on the following 3 rules:
|
||||||
|
# 1) If project is in blacklist, we don't clone/install it, period.
|
||||||
|
# 2) If whitelist is empty, we clone/install everything else.
|
||||||
|
# 3) If whitelist is not empty, we clone/install everything in the
|
||||||
|
# whitelist.
|
||||||
|
if proj in blacklist:
|
||||||
|
continue
|
||||||
|
if whitelist and proj not in whitelist:
|
||||||
|
continue
|
||||||
|
repo = val['repository']
|
||||||
|
branch = val['branch']
|
||||||
|
repo_dir = _git_clone_and_install_single(repo, branch,
|
||||||
|
update_requirements)
|
||||||
|
if proj == 'requirements':
|
||||||
|
requirements_dir = repo_dir
|
||||||
|
installed.append(proj)
|
||||||
|
return installed
|
||||||
|
|
||||||
|
|
||||||
|
def _git_clone_and_install_single(repo, branch, update_requirements=False):
|
||||||
|
"""Clone and install a single git repository."""
|
||||||
|
dest_parent_dir = "/mnt/openstack-git/"
|
||||||
|
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
|
||||||
|
|
||||||
|
if not os.path.exists(dest_parent_dir):
|
||||||
|
juju_log('Host dir not mounted at {}. '
|
||||||
|
'Creating directory there instead.'.format(dest_parent_dir))
|
||||||
|
os.mkdir(dest_parent_dir)
|
||||||
|
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
|
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
|
||||||
|
else:
|
||||||
|
repo_dir = dest_dir
|
||||||
|
|
||||||
|
if update_requirements:
|
||||||
|
if not requirements_dir:
|
||||||
|
error_out('requirements repo must be cloned before '
|
||||||
|
'updating from global requirements.')
|
||||||
|
_git_update_requirements(repo_dir, requirements_dir)
|
||||||
|
|
||||||
|
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||||
|
pip_install(repo_dir)
|
||||||
|
|
||||||
|
return repo_dir
|
||||||
|
|
||||||
|
|
||||||
|
def _git_update_requirements(package_dir, reqs_dir):
|
||||||
|
"""Update from global requirements.
|
||||||
|
|
||||||
|
Update an OpenStack git directory's requirements.txt and
|
||||||
|
test-requirements.txt from global-requirements.txt."""
|
||||||
|
orig_dir = os.getcwd()
|
||||||
|
os.chdir(reqs_dir)
|
||||||
|
cmd = "python update.py {}".format(package_dir)
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
package = os.path.basename(package_dir)
|
||||||
|
error_out("Error updating {} from global-requirements.txt".format(package))
|
||||||
|
os.chdir(orig_dir)
|
0
hooks/charmhelpers/contrib/python/__init__.py
Normal file
0
hooks/charmhelpers/contrib/python/__init__.py
Normal file
77
hooks/charmhelpers/contrib/python/packages.py
Normal file
77
hooks/charmhelpers/contrib/python/packages.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
|
from charmhelpers.core.hookenv import log
|
||||||
|
|
||||||
|
try:
|
||||||
|
from pip import main as pip_execute
|
||||||
|
except ImportError:
|
||||||
|
apt_update()
|
||||||
|
apt_install('python-pip')
|
||||||
|
from pip import main as pip_execute
|
||||||
|
|
||||||
|
|
||||||
|
def parse_options(given, available):
|
||||||
|
"""Given a set of options, check if available"""
|
||||||
|
for key, value in sorted(given.items()):
|
||||||
|
if key in available:
|
||||||
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_install_requirements(requirements, **options):
|
||||||
|
"""Install a requirements file """
|
||||||
|
command = ["install"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'src', 'log', )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
command.append("-r {0}".format(requirements))
|
||||||
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_install(package, fatal=False, **options):
|
||||||
|
"""Install a python package"""
|
||||||
|
command = ["install"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'src', 'log', "index-url", )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
if isinstance(package, list):
|
||||||
|
command.extend(package)
|
||||||
|
else:
|
||||||
|
command.append(package)
|
||||||
|
|
||||||
|
log("Installing {} package with options: {}".format(package,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_uninstall(package, **options):
|
||||||
|
"""Uninstall a python package"""
|
||||||
|
command = ["uninstall", "-q", "-y"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'log', )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
if isinstance(package, list):
|
||||||
|
command.extend(package)
|
||||||
|
else:
|
||||||
|
command.append(package)
|
||||||
|
|
||||||
|
log("Uninstalling {} package with options: {}".format(package,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_list():
|
||||||
|
"""Returns the list of current python installed packages
|
||||||
|
"""
|
||||||
|
return pip_execute(["list"])
|
428
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
428
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
@ -0,0 +1,428 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
CalledProcessError,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units,
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
INFO,
|
||||||
|
WARNING,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
mount,
|
||||||
|
mounts,
|
||||||
|
service_start,
|
||||||
|
service_stop,
|
||||||
|
service_running,
|
||||||
|
umount,
|
||||||
|
)
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
)
|
||||||
|
|
||||||
|
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
|
CEPH_CONF = """[global]
|
||||||
|
auth supported = {auth}
|
||||||
|
keyring = {keyring}
|
||||||
|
mon host = {mon_hosts}
|
||||||
|
log to syslog = {use_syslog}
|
||||||
|
err to syslog = {use_syslog}
|
||||||
|
clog to syslog = {use_syslog}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
"""Basic Ceph client installation."""
|
||||||
|
ceph_dir = "/etc/ceph"
|
||||||
|
if not os.path.exists(ceph_dir):
|
||||||
|
os.mkdir(ceph_dir)
|
||||||
|
|
||||||
|
apt_install('ceph-common', fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
def rbd_exists(service, pool, rbd_img):
|
||||||
|
"""Check to see if a RADOS block device exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'list', '--id',
|
||||||
|
service, '--pool', pool]).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return rbd_img in out
|
||||||
|
|
||||||
|
|
||||||
|
def create_rbd_image(service, pool, image, sizemb):
|
||||||
|
"""Create a new RADOS block device."""
|
||||||
|
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
|
||||||
|
'--pool', pool]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def create_pool(service, name, replicas=3):
|
||||||
|
"""Create a new RADOS pool."""
|
||||||
|
if pool_exists(service, name):
|
||||||
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
|
level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate the number of placement groups based
|
||||||
|
# on upstream recommended best practices.
|
||||||
|
osds = get_osds(service)
|
||||||
|
if osds:
|
||||||
|
pgnum = (len(osds) * 100 // replicas)
|
||||||
|
else:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
pgnum = 200
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
||||||
|
str(replicas)]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_pool(service, name):
|
||||||
|
"""Delete a RADOS pool from ceph."""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
|
||||||
|
'--yes-i-really-really-mean-it']
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyfile_path(service):
|
||||||
|
return KEYFILE.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyring_path(service):
|
||||||
|
return KEYRING.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def create_keyring(service, key):
|
||||||
|
"""Create a new Ceph keyring containing key."""
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if os.path.exists(keyring):
|
||||||
|
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
cmd = ['ceph-authtool', keyring, '--create-keyring',
|
||||||
|
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
|
||||||
|
check_call(cmd)
|
||||||
|
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_keyring(service):
|
||||||
|
"""Delete an existing Ceph keyring."""
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if not os.path.exists(keyring):
|
||||||
|
log('Keyring does not exist at %s' % keyring, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
os.remove(keyring)
|
||||||
|
log('Deleted ring at %s.' % keyring, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def create_key_file(service, key):
|
||||||
|
"""Create a file containing key."""
|
||||||
|
keyfile = _keyfile_path(service)
|
||||||
|
if os.path.exists(keyfile):
|
||||||
|
log('Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(keyfile, 'w') as fd:
|
||||||
|
fd.write(key)
|
||||||
|
|
||||||
|
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ceph_nodes():
|
||||||
|
"""Query named relation 'ceph' to determine current nodes."""
|
||||||
|
hosts = []
|
||||||
|
for r_id in relation_ids('ceph'):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
|
def configure(service, key, auth, use_syslog):
|
||||||
|
"""Perform basic configuration of Ceph."""
|
||||||
|
create_keyring(service, key)
|
||||||
|
create_key_file(service, key)
|
||||||
|
hosts = get_ceph_nodes()
|
||||||
|
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||||
|
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||||
|
keyring=_keyring_path(service),
|
||||||
|
mon_hosts=",".join(map(str, hosts)),
|
||||||
|
use_syslog=use_syslog))
|
||||||
|
modprobe('rbd')
|
||||||
|
|
||||||
|
|
||||||
|
def image_mapped(name):
|
||||||
|
"""Determine whether a RADOS block device is mapped locally."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def map_block_storage(service, pool, image):
|
||||||
|
"""Map a RADOS block device for local use."""
|
||||||
|
cmd = [
|
||||||
|
'rbd',
|
||||||
|
'map',
|
||||||
|
'{}/{}'.format(pool, image),
|
||||||
|
'--user',
|
||||||
|
service,
|
||||||
|
'--secret',
|
||||||
|
_keyfile_path(service),
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def filesystem_mounted(fs):
|
||||||
|
"""Determine whether a filesytems is already mounted."""
|
||||||
|
return fs in [f for f, m in mounts()]
|
||||||
|
|
||||||
|
|
||||||
|
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||||
|
"""Make a new filesystem on the specified block device."""
|
||||||
|
count = 0
|
||||||
|
e_noent = os.errno.ENOENT
|
||||||
|
while not os.path.exists(blk_device):
|
||||||
|
if count >= timeout:
|
||||||
|
log('Gave up waiting on block device %s' % blk_device,
|
||||||
|
level=ERROR)
|
||||||
|
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||||
|
|
||||||
|
log('Waiting for block device %s to appear' % blk_device,
|
||||||
|
level=DEBUG)
|
||||||
|
count += 1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
log('Formatting block device %s as filesystem %s.' %
|
||||||
|
(blk_device, fstype), level=INFO)
|
||||||
|
check_call(['mkfs', '-t', fstype, blk_device])
|
||||||
|
|
||||||
|
|
||||||
|
def place_data_on_block_device(blk_device, data_src_dst):
|
||||||
|
"""Migrate data in data_src_dst to blk_device and then remount."""
|
||||||
|
# mount block device into /mnt
|
||||||
|
mount(blk_device, '/mnt')
|
||||||
|
# copy data to /mnt
|
||||||
|
copy_files(data_src_dst, '/mnt')
|
||||||
|
# umount block device
|
||||||
|
umount('/mnt')
|
||||||
|
# Grab user/group ID's from original source
|
||||||
|
_dir = os.stat(data_src_dst)
|
||||||
|
uid = _dir.st_uid
|
||||||
|
gid = _dir.st_gid
|
||||||
|
# re-mount where the data should originally be
|
||||||
|
# TODO: persist is currently a NO-OP in core.host
|
||||||
|
mount(blk_device, data_src_dst, persist=True)
|
||||||
|
# ensure original ownership of new mount.
|
||||||
|
os.chown(data_src_dst, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: re-use
|
||||||
|
def modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
log('Loading kernel module', level=INFO)
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
check_call(cmd)
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||||
|
"""Copy files from src to dst."""
|
||||||
|
for item in os.listdir(src):
|
||||||
|
s = os.path.join(src, item)
|
||||||
|
d = os.path.join(dst, item)
|
||||||
|
if os.path.isdir(s):
|
||||||
|
shutil.copytree(s, d, symlinks, ignore)
|
||||||
|
else:
|
||||||
|
shutil.copy2(s, d)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||||
|
blk_device, fstype, system_services=[],
|
||||||
|
replicas=3):
|
||||||
|
"""NOTE: This function must only be called from a single service unit for
|
||||||
|
the same rbd_img otherwise data loss will occur.
|
||||||
|
|
||||||
|
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||||
|
and the device is formatted and mounted at the given mount_point.
|
||||||
|
|
||||||
|
If formatting a device for the first time, data existing at mount_point
|
||||||
|
will be migrated to the RBD device before being re-mounted.
|
||||||
|
|
||||||
|
All services listed in system_services will be stopped prior to data
|
||||||
|
migration and restarted when complete.
|
||||||
|
"""
|
||||||
|
# Ensure pool, RBD image, RBD mappings are in place.
|
||||||
|
if not pool_exists(service, pool):
|
||||||
|
log('Creating new pool {}.'.format(pool), level=INFO)
|
||||||
|
create_pool(service, pool, replicas=replicas)
|
||||||
|
|
||||||
|
if not rbd_exists(service, pool, rbd_img):
|
||||||
|
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
|
||||||
|
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||||
|
|
||||||
|
if not image_mapped(rbd_img):
|
||||||
|
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
|
||||||
|
level=INFO)
|
||||||
|
map_block_storage(service, pool, rbd_img)
|
||||||
|
|
||||||
|
# make file system
|
||||||
|
# TODO: What happens if for whatever reason this is run again and
|
||||||
|
# the data is already in the rbd device and/or is mounted??
|
||||||
|
# When it is mounted already, it will fail to make the fs
|
||||||
|
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||||
|
# otherwise this hook will blow away existing data if its executed
|
||||||
|
# after a reboot.
|
||||||
|
if not filesystem_mounted(mount_point):
|
||||||
|
make_filesystem(blk_device, fstype)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
if service_running(svc):
|
||||||
|
log('Stopping services {} prior to migrating data.'
|
||||||
|
.format(svc), level=DEBUG)
|
||||||
|
service_stop(svc)
|
||||||
|
|
||||||
|
place_data_on_block_device(blk_device, mount_point)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
log('Starting service {} after migrating data.'
|
||||||
|
.format(svc), level=DEBUG)
|
||||||
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_keyring(service, user=None, group=None):
|
||||||
|
"""Ensures a ceph keyring is created for a named service and optionally
|
||||||
|
ensures user and group ownership.
|
||||||
|
|
||||||
|
Returns False if no ceph key is available in relation state.
|
||||||
|
"""
|
||||||
|
key = None
|
||||||
|
for rid in relation_ids('ceph'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
|
if key:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not key:
|
||||||
|
return False
|
||||||
|
|
||||||
|
create_keyring(service=service, key=key)
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if user and group:
|
||||||
|
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def ceph_version():
|
||||||
|
"""Retrieve the local version of ceph."""
|
||||||
|
if os.path.exists('/usr/bin/ceph'):
|
||||||
|
cmd = ['ceph', '-v']
|
||||||
|
output = check_output(cmd).decode('US-ASCII')
|
||||||
|
output = output.split()
|
||||||
|
if len(output) > 3:
|
||||||
|
return output[2]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class CephBrokerRq(object):
|
||||||
|
"""Ceph broker request.
|
||||||
|
|
||||||
|
Multiple operations can be added to a request and sent to the Ceph broker
|
||||||
|
to be executed.
|
||||||
|
|
||||||
|
Request is json-encoded for sending over the wire.
|
||||||
|
|
||||||
|
The API is versioned and defaults to version 1.
|
||||||
|
"""
|
||||||
|
def __init__(self, api_version=1):
|
||||||
|
self.api_version = api_version
|
||||||
|
self.ops = []
|
||||||
|
|
||||||
|
def add_op_create_pool(self, name, replica_count=3):
|
||||||
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
|
'replicas': replica_count})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request(self):
|
||||||
|
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
||||||
|
|
||||||
|
|
||||||
|
class CephBrokerRsp(object):
|
||||||
|
"""Ceph broker response.
|
||||||
|
|
||||||
|
Response is json-decoded and contents provided as methods/properties.
|
||||||
|
|
||||||
|
The API is versioned and defaults to version 1.
|
||||||
|
"""
|
||||||
|
def __init__(self, encoded_rsp):
|
||||||
|
self.api_version = None
|
||||||
|
self.rsp = json.loads(encoded_rsp)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exit_code(self):
|
||||||
|
return self.rsp.get('exit-code')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exit_msg(self):
|
||||||
|
return self.rsp.get('stderr')
|
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
)
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# loopback device helpers.
|
||||||
|
##################################################
|
||||||
|
def loopback_devices():
|
||||||
|
'''
|
||||||
|
Parse through 'losetup -a' output to determine currently mapped
|
||||||
|
loopback devices. Output is expected to look like:
|
||||||
|
|
||||||
|
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||||
|
|
||||||
|
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||||
|
'''
|
||||||
|
loopbacks = {}
|
||||||
|
cmd = ['losetup', '-a']
|
||||||
|
devs = [d.strip().split(' ') for d in
|
||||||
|
check_output(cmd).splitlines() if d != '']
|
||||||
|
for dev, _, f in devs:
|
||||||
|
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||||
|
return loopbacks
|
||||||
|
|
||||||
|
|
||||||
|
def create_loopback(file_path):
|
||||||
|
'''
|
||||||
|
Create a loopback device for a given backing file.
|
||||||
|
|
||||||
|
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
file_path = os.path.abspath(file_path)
|
||||||
|
check_call(['losetup', '--find', file_path])
|
||||||
|
for d, f in six.iteritems(loopback_devices()):
|
||||||
|
if f == file_path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_loopback_device(path, size):
|
||||||
|
'''
|
||||||
|
Ensure a loopback device exists for a given backing file path and size.
|
||||||
|
If it a loopback device is not mapped to file, a new one will be created.
|
||||||
|
|
||||||
|
TODO: Confirm size of found loopback device.
|
||||||
|
|
||||||
|
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
for d, f in six.iteritems(loopback_devices()):
|
||||||
|
if f == path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
cmd = ['truncate', '--size', size, path]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
return create_loopback(path)
|
89
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
89
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
from subprocess import (
|
||||||
|
CalledProcessError,
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
Popen,
|
||||||
|
PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# LVM helpers.
|
||||||
|
##################################################
|
||||||
|
def deactivate_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
Deactivate any volume gruop associated with an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path to LVM physical volume
|
||||||
|
'''
|
||||||
|
vg = list_lvm_volume_group(block_device)
|
||||||
|
if vg:
|
||||||
|
cmd = ['vgchange', '-an', vg]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def is_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Determine whether a block device is initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: boolean: True if block device is a PV, False if not.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
check_output(['pvdisplay', block_device])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def remove_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Remove LVM PV signatures from a given block device.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to scrub.
|
||||||
|
'''
|
||||||
|
p = Popen(['pvremove', '-ff', block_device],
|
||||||
|
stdin=PIPE)
|
||||||
|
p.communicate(input='y\n')
|
||||||
|
|
||||||
|
|
||||||
|
def list_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
List LVM volume group associated with a given block device.
|
||||||
|
|
||||||
|
Assumes block device is a valid LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: str: Name of volume group associated with block device or None
|
||||||
|
'''
|
||||||
|
vg = None
|
||||||
|
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||||
|
for l in pvd:
|
||||||
|
l = l.decode('UTF-8')
|
||||||
|
if l.strip().startswith('VG Name'):
|
||||||
|
vg = ' '.join(l.strip().split()[2:])
|
||||||
|
return vg
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Initialize a block device as an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to initialize.
|
||||||
|
|
||||||
|
'''
|
||||||
|
check_call(['pvcreate', block_device])
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_volume_group(volume_group, block_device):
|
||||||
|
'''
|
||||||
|
Create an LVM volume group backed by a given block device.
|
||||||
|
|
||||||
|
Assumes block device has already been initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param volume_group: str: Name of volume group to create.
|
||||||
|
:block_device: str: Full path of PV-initialized block device.
|
||||||
|
'''
|
||||||
|
check_call(['vgcreate', volume_group, block_device])
|
@ -30,7 +30,8 @@ def zap_disk(block_device):
|
|||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
||||||
'--clear', block_device])
|
'--clear', block_device])
|
||||||
dev_end = check_output(['blockdev', '--getsz', block_device])
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
|
block_device]).decode('UTF-8')
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||||
'bs=1M', 'count=1'])
|
'bs=1M', 'count=1'])
|
||||||
@ -47,7 +48,7 @@ def is_device_mounted(device):
|
|||||||
it doesn't.
|
it doesn't.
|
||||||
'''
|
'''
|
||||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||||
out = check_output(['mount'])
|
out = check_output(['mount']).decode('UTF-8')
|
||||||
if is_partition:
|
if is_partition:
|
||||||
return bool(re.search(device + r"\b", out))
|
return bool(re.search(device + r"\b", out))
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
return bool(re.search(device + r"[0-9]+\b", out))
|
||||||
|
41
hooks/charmhelpers/core/decorators.py
Normal file
41
hooks/charmhelpers/core/decorators.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Edward Hope-Morley <opentastic@gmail.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||||
|
"""If the decorated function raises exception exc_type, allow num_retries
|
||||||
|
retry attempts before raise the exception.
|
||||||
|
"""
|
||||||
|
def _retry_on_exception_inner_1(f):
|
||||||
|
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||||
|
retries = num_retries
|
||||||
|
multiplier = 1
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exc_type:
|
||||||
|
if not retries:
|
||||||
|
raise
|
||||||
|
|
||||||
|
delay = base_delay * multiplier
|
||||||
|
multiplier += 1
|
||||||
|
log("Retrying '%s' %d more times (delay=%s)" %
|
||||||
|
(f.__name__, retries, delay), level=INFO)
|
||||||
|
retries -= 1
|
||||||
|
if delay:
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_2
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_1
|
@ -3,10 +3,11 @@
|
|||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
import io
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
class Fstab(file):
|
class Fstab(io.FileIO):
|
||||||
"""This class extends file in order to implement a file reader/writer
|
"""This class extends file in order to implement a file reader/writer
|
||||||
for file `/etc/fstab`
|
for file `/etc/fstab`
|
||||||
"""
|
"""
|
||||||
@ -24,8 +25,8 @@ class Fstab(file):
|
|||||||
options = "defaults"
|
options = "defaults"
|
||||||
|
|
||||||
self.options = options
|
self.options = options
|
||||||
self.d = d
|
self.d = int(d)
|
||||||
self.p = p
|
self.p = int(p)
|
||||||
|
|
||||||
def __eq__(self, o):
|
def __eq__(self, o):
|
||||||
return str(self) == str(o)
|
return str(self) == str(o)
|
||||||
@ -45,7 +46,7 @@ class Fstab(file):
|
|||||||
self._path = path
|
self._path = path
|
||||||
else:
|
else:
|
||||||
self._path = self.DEFAULT_PATH
|
self._path = self.DEFAULT_PATH
|
||||||
file.__init__(self, self._path, 'r+')
|
super(Fstab, self).__init__(self._path, 'rb+')
|
||||||
|
|
||||||
def _hydrate_entry(self, line):
|
def _hydrate_entry(self, line):
|
||||||
# NOTE: use split with no arguments to split on any
|
# NOTE: use split with no arguments to split on any
|
||||||
@ -58,8 +59,9 @@ class Fstab(file):
|
|||||||
def entries(self):
|
def entries(self):
|
||||||
self.seek(0)
|
self.seek(0)
|
||||||
for line in self.readlines():
|
for line in self.readlines():
|
||||||
|
line = line.decode('us-ascii')
|
||||||
try:
|
try:
|
||||||
if not line.startswith("#"):
|
if line.strip() and not line.startswith("#"):
|
||||||
yield self._hydrate_entry(line)
|
yield self._hydrate_entry(line)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
@ -75,14 +77,14 @@ class Fstab(file):
|
|||||||
if self.get_entry_by_attr('device', entry.device):
|
if self.get_entry_by_attr('device', entry.device):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.write(str(entry) + '\n')
|
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||||
self.truncate()
|
self.truncate()
|
||||||
return entry
|
return entry
|
||||||
|
|
||||||
def remove_entry(self, entry):
|
def remove_entry(self, entry):
|
||||||
self.seek(0)
|
self.seek(0)
|
||||||
|
|
||||||
lines = self.readlines()
|
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||||
|
|
||||||
found = False
|
found = False
|
||||||
for index, line in enumerate(lines):
|
for index, line in enumerate(lines):
|
||||||
@ -97,7 +99,7 @@ class Fstab(file):
|
|||||||
lines.remove(line)
|
lines.remove(line)
|
||||||
|
|
||||||
self.seek(0)
|
self.seek(0)
|
||||||
self.write(''.join(lines))
|
self.write(''.join(lines).encode('us-ascii'))
|
||||||
self.truncate()
|
self.truncate()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -9,9 +9,14 @@ import json
|
|||||||
import yaml
|
import yaml
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import UserDict
|
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
|
import six
|
||||||
|
if not six.PY3:
|
||||||
|
from UserDict import UserDict
|
||||||
|
else:
|
||||||
|
from collections import UserDict
|
||||||
|
|
||||||
CRITICAL = "CRITICAL"
|
CRITICAL = "CRITICAL"
|
||||||
ERROR = "ERROR"
|
ERROR = "ERROR"
|
||||||
WARNING = "WARNING"
|
WARNING = "WARNING"
|
||||||
@ -63,16 +68,18 @@ def log(message, level=None):
|
|||||||
command = ['juju-log']
|
command = ['juju-log']
|
||||||
if level:
|
if level:
|
||||||
command += ['-l', level]
|
command += ['-l', level]
|
||||||
|
if not isinstance(message, six.string_types):
|
||||||
|
message = repr(message)
|
||||||
command += [message]
|
command += [message]
|
||||||
subprocess.call(command)
|
subprocess.call(command)
|
||||||
|
|
||||||
|
|
||||||
class Serializable(UserDict.IterableUserDict):
|
class Serializable(UserDict):
|
||||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||||
|
|
||||||
def __init__(self, obj):
|
def __init__(self, obj):
|
||||||
# wrap the object
|
# wrap the object
|
||||||
UserDict.IterableUserDict.__init__(self)
|
UserDict.__init__(self)
|
||||||
self.data = obj
|
self.data = obj
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
def __getattr__(self, attr):
|
||||||
@ -218,7 +225,7 @@ class Config(dict):
|
|||||||
prev_keys = []
|
prev_keys = []
|
||||||
if self._prev_dict is not None:
|
if self._prev_dict is not None:
|
||||||
prev_keys = self._prev_dict.keys()
|
prev_keys = self._prev_dict.keys()
|
||||||
return list(set(prev_keys + dict.keys(self)))
|
return list(set(prev_keys + list(dict.keys(self))))
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
def load_previous(self, path=None):
|
||||||
"""Load previous copy of config from disk.
|
"""Load previous copy of config from disk.
|
||||||
@ -269,7 +276,7 @@ class Config(dict):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
if self._prev_dict:
|
||||||
for k, v in self._prev_dict.iteritems():
|
for k, v in six.iteritems(self._prev_dict):
|
||||||
if k not in self:
|
if k not in self:
|
||||||
self[k] = v
|
self[k] = v
|
||||||
with open(self.path, 'w') as f:
|
with open(self.path, 'w') as f:
|
||||||
@ -284,7 +291,8 @@ def config(scope=None):
|
|||||||
config_cmd_line.append(scope)
|
config_cmd_line.append(scope)
|
||||||
config_cmd_line.append('--format=json')
|
config_cmd_line.append('--format=json')
|
||||||
try:
|
try:
|
||||||
config_data = json.loads(subprocess.check_output(config_cmd_line))
|
config_data = json.loads(
|
||||||
|
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||||
if scope is not None:
|
if scope is not None:
|
||||||
return config_data
|
return config_data
|
||||||
return Config(config_data)
|
return Config(config_data)
|
||||||
@ -303,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None):
|
|||||||
if unit:
|
if unit:
|
||||||
_args.append(unit)
|
_args.append(unit)
|
||||||
try:
|
try:
|
||||||
return json.loads(subprocess.check_output(_args))
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return None
|
return None
|
||||||
except CalledProcessError, e:
|
except CalledProcessError as e:
|
||||||
if e.returncode == 2:
|
if e.returncode == 2:
|
||||||
return None
|
return None
|
||||||
raise
|
raise
|
||||||
@ -318,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|||||||
relation_cmd_line = ['relation-set']
|
relation_cmd_line = ['relation-set']
|
||||||
if relation_id is not None:
|
if relation_id is not None:
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
for k, v in (relation_settings.items() + kwargs.items()):
|
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
||||||
if v is None:
|
if v is None:
|
||||||
relation_cmd_line.append('{}='.format(k))
|
relation_cmd_line.append('{}='.format(k))
|
||||||
else:
|
else:
|
||||||
@ -335,7 +343,8 @@ def relation_ids(reltype=None):
|
|||||||
relid_cmd_line = ['relation-ids', '--format=json']
|
relid_cmd_line = ['relation-ids', '--format=json']
|
||||||
if reltype is not None:
|
if reltype is not None:
|
||||||
relid_cmd_line.append(reltype)
|
relid_cmd_line.append(reltype)
|
||||||
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
return json.loads(
|
||||||
|
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@ -346,7 +355,8 @@ def related_units(relid=None):
|
|||||||
units_cmd_line = ['relation-list', '--format=json']
|
units_cmd_line = ['relation-list', '--format=json']
|
||||||
if relid is not None:
|
if relid is not None:
|
||||||
units_cmd_line.extend(('-r', relid))
|
units_cmd_line.extend(('-r', relid))
|
||||||
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
return json.loads(
|
||||||
|
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
@ -385,21 +395,31 @@ def relations_of_type(reltype=None):
|
|||||||
return relation_data
|
return relation_data
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def metadata():
|
||||||
|
"""Get the current charm metadata.yaml contents as a python object"""
|
||||||
|
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
||||||
|
return yaml.safe_load(md)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def relation_types():
|
def relation_types():
|
||||||
"""Get a list of relation types supported by this charm"""
|
"""Get a list of relation types supported by this charm"""
|
||||||
charmdir = os.environ.get('CHARM_DIR', '')
|
|
||||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
|
||||||
md = yaml.safe_load(mdf)
|
|
||||||
rel_types = []
|
rel_types = []
|
||||||
|
md = metadata()
|
||||||
for key in ('provides', 'requires', 'peers'):
|
for key in ('provides', 'requires', 'peers'):
|
||||||
section = md.get(key)
|
section = md.get(key)
|
||||||
if section:
|
if section:
|
||||||
rel_types.extend(section.keys())
|
rel_types.extend(section.keys())
|
||||||
mdf.close()
|
|
||||||
return rel_types
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def charm_name():
|
||||||
|
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||||
|
return metadata().get('name')
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def relations():
|
def relations():
|
||||||
"""Get a nested dictionary of relation data for all related units"""
|
"""Get a nested dictionary of relation data for all related units"""
|
||||||
@ -455,7 +475,7 @@ def unit_get(attribute):
|
|||||||
"""Get the unit ID for the remote unit"""
|
"""Get the unit ID for the remote unit"""
|
||||||
_args = ['unit-get', '--format=json', attribute]
|
_args = ['unit-get', '--format=json', attribute]
|
||||||
try:
|
try:
|
||||||
return json.loads(subprocess.check_output(_args))
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -14,11 +14,12 @@ import string
|
|||||||
import subprocess
|
import subprocess
|
||||||
import hashlib
|
import hashlib
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from hookenv import log
|
import six
|
||||||
from fstab import Fstab
|
|
||||||
|
from .hookenv import log
|
||||||
|
from .fstab import Fstab
|
||||||
|
|
||||||
|
|
||||||
def service_start(service_name):
|
def service_start(service_name):
|
||||||
@ -54,7 +55,9 @@ def service(action, service_name):
|
|||||||
def service_running(service):
|
def service_running(service):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
output = subprocess.check_output(
|
||||||
|
['service', service, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -67,7 +70,9 @@ def service_running(service):
|
|||||||
def service_available(service_name):
|
def service_available(service_name):
|
||||||
"""Determine whether a system service is available"""
|
"""Determine whether a system service is available"""
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
subprocess.check_output(
|
||||||
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
return 'unrecognized service' not in e.output
|
return 'unrecognized service' not in e.output
|
||||||
else:
|
else:
|
||||||
@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def add_group(group_name, system_group=False):
|
||||||
|
"""Add a group to the system"""
|
||||||
|
try:
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
log('group {0} already exists!'.format(group_name))
|
||||||
|
except KeyError:
|
||||||
|
log('creating group {0}'.format(group_name))
|
||||||
|
cmd = ['addgroup']
|
||||||
|
if system_group:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--group',
|
||||||
|
])
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
return group_info
|
||||||
|
|
||||||
|
|
||||||
def add_user_to_group(username, group):
|
def add_user_to_group(username, group):
|
||||||
"""Add a user to a group"""
|
"""Add a user to a group"""
|
||||||
cmd = [
|
cmd = [
|
||||||
@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
|
|||||||
cmd.append(from_path)
|
cmd.append(from_path)
|
||||||
cmd.append(to_path)
|
cmd.append(to_path)
|
||||||
log(" ".join(cmd))
|
log(" ".join(cmd))
|
||||||
return subprocess.check_output(cmd).strip()
|
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||||
|
|
||||||
|
|
||||||
def symlink(source, destination):
|
def symlink(source, destination):
|
||||||
@ -130,23 +155,26 @@ def symlink(source, destination):
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||||
"""Create a directory"""
|
"""Create a directory"""
|
||||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||||
perms))
|
perms))
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
gid = grp.getgrnam(group).gr_gid
|
gid = grp.getgrnam(group).gr_gid
|
||||||
realpath = os.path.abspath(path)
|
realpath = os.path.abspath(path)
|
||||||
if os.path.exists(realpath):
|
path_exists = os.path.exists(realpath)
|
||||||
if force and not os.path.isdir(realpath):
|
if path_exists and force:
|
||||||
|
if not os.path.isdir(realpath):
|
||||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||||
os.unlink(realpath)
|
os.unlink(realpath)
|
||||||
else:
|
os.makedirs(realpath, perms)
|
||||||
|
os.chown(realpath, uid, gid)
|
||||||
|
elif not path_exists:
|
||||||
os.makedirs(realpath, perms)
|
os.makedirs(realpath, perms)
|
||||||
os.chown(realpath, uid, gid)
|
os.chown(realpath, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
def write_file(path, content, owner='root', group='root', perms=0444):
|
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||||
"""Create or overwrite a file with the contents of a string"""
|
"""Create or overwrite a file with the contents of a string"""
|
||||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
@ -177,7 +205,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
|||||||
cmd_args.extend([device, mountpoint])
|
cmd_args.extend([device, mountpoint])
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(cmd_args)
|
subprocess.check_output(cmd_args)
|
||||||
except subprocess.CalledProcessError, e:
|
except subprocess.CalledProcessError as e:
|
||||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -191,7 +219,7 @@ def umount(mountpoint, persist=False):
|
|||||||
cmd_args = ['umount', mountpoint]
|
cmd_args = ['umount', mountpoint]
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(cmd_args)
|
subprocess.check_output(cmd_args)
|
||||||
except subprocess.CalledProcessError, e:
|
except subprocess.CalledProcessError as e:
|
||||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -218,8 +246,8 @@ def file_hash(path, hash_type='md5'):
|
|||||||
"""
|
"""
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
h = getattr(hashlib, hash_type)()
|
h = getattr(hashlib, hash_type)()
|
||||||
with open(path, 'r') as source:
|
with open(path, 'rb') as source:
|
||||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
h.update(source.read())
|
||||||
return h.hexdigest()
|
return h.hexdigest()
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
@ -297,7 +325,7 @@ def pwgen(length=None):
|
|||||||
if length is None:
|
if length is None:
|
||||||
length = random.choice(range(35, 45))
|
length = random.choice(range(35, 45))
|
||||||
alphanumeric_chars = [
|
alphanumeric_chars = [
|
||||||
l for l in (string.letters + string.digits)
|
l for l in (string.ascii_letters + string.digits)
|
||||||
if l not in 'l0QD1vAEIOUaeiou']
|
if l not in 'l0QD1vAEIOUaeiou']
|
||||||
random_chars = [
|
random_chars = [
|
||||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
@ -306,14 +334,14 @@ def pwgen(length=None):
|
|||||||
|
|
||||||
def list_nics(nic_type):
|
def list_nics(nic_type):
|
||||||
'''Return a list of nics of given type(s)'''
|
'''Return a list of nics of given type(s)'''
|
||||||
if isinstance(nic_type, basestring):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
int_types = nic_type
|
int_types = nic_type
|
||||||
interfaces = []
|
interfaces = []
|
||||||
for int_type in int_types:
|
for int_type in int_types:
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
ip_output = subprocess.check_output(cmd).split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
ip_output = (line for line in ip_output if line)
|
ip_output = (line for line in ip_output if line)
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
if line.split()[1].startswith(int_type):
|
if line.split()[1].startswith(int_type):
|
||||||
@ -335,7 +363,7 @@ def set_nic_mtu(nic, mtu):
|
|||||||
|
|
||||||
def get_nic_mtu(nic):
|
def get_nic_mtu(nic):
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
mtu = ""
|
mtu = ""
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
words = line.split()
|
words = line.split()
|
||||||
@ -346,7 +374,7 @@ def get_nic_mtu(nic):
|
|||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
def get_nic_hwaddr(nic):
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd)
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
hwaddr = ""
|
hwaddr = ""
|
||||||
words = ip_output.split()
|
words = ip_output.split()
|
||||||
if 'link/ether' in words:
|
if 'link/ether' in words:
|
||||||
@ -363,8 +391,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
from charmhelpers.fetch import apt_cache
|
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
pkgcache = apt_cache()
|
pkgcache = apt_cache()
|
||||||
pkg = pkgcache[package]
|
pkg = pkgcache[package]
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
from .base import *
|
from .base import * # NOQA
|
||||||
from .helpers import *
|
from .helpers import * # NOQA
|
||||||
|
@ -196,7 +196,7 @@ class StoredContext(dict):
|
|||||||
if not os.path.isabs(file_name):
|
if not os.path.isabs(file_name):
|
||||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
with open(file_name, 'w') as file_stream:
|
with open(file_name, 'w') as file_stream:
|
||||||
os.fchmod(file_stream.fileno(), 0600)
|
os.fchmod(file_stream.fileno(), 0o600)
|
||||||
yaml.dump(config_data, file_stream)
|
yaml.dump(config_data, file_stream)
|
||||||
|
|
||||||
def read_context(self, file_name):
|
def read_context(self, file_name):
|
||||||
@ -211,15 +211,19 @@ class StoredContext(dict):
|
|||||||
|
|
||||||
class TemplateCallback(ManagerCallback):
|
class TemplateCallback(ManagerCallback):
|
||||||
"""
|
"""
|
||||||
Callback class that will render a Jinja2 template, for use as a ready action.
|
Callback class that will render a Jinja2 template, for use as a ready
|
||||||
|
action.
|
||||||
|
|
||||||
|
:param str source: The template source file, relative to
|
||||||
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str source: The template source file, relative to `$CHARM_DIR/templates`
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
def __init__(self, source, target,
|
||||||
|
owner='root', group='root', perms=0o444):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
|
@ -4,7 +4,8 @@ from charmhelpers.core import host
|
|||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
def render(source, target, context, owner='root', group='root',
|
||||||
|
perms=0o444, templates_dir=None):
|
||||||
"""
|
"""
|
||||||
Render a template.
|
Render a template.
|
||||||
|
|
||||||
@ -47,5 +48,5 @@ def render(source, target, context, owner='root', group='root', perms=0444, temp
|
|||||||
level=hookenv.ERROR)
|
level=hookenv.ERROR)
|
||||||
raise e
|
raise e
|
||||||
content = template.render(context)
|
content = template.render(context)
|
||||||
host.mkdir(os.path.dirname(target))
|
host.mkdir(os.path.dirname(target), owner, group)
|
||||||
host.write_file(target, content, owner, group, perms)
|
host.write_file(target, content, owner, group, perms)
|
||||||
|
@ -5,10 +5,6 @@ from yaml import safe_load
|
|||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
lsb_release
|
lsb_release
|
||||||
)
|
)
|
||||||
from urlparse import (
|
|
||||||
urlparse,
|
|
||||||
urlunparse,
|
|
||||||
)
|
|
||||||
import subprocess
|
import subprocess
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import (
|
|||||||
)
|
)
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
from urllib.parse import urlparse, urlunparse
|
||||||
|
else:
|
||||||
|
from urlparse import urlparse, urlunparse
|
||||||
|
|
||||||
|
|
||||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||||
@ -62,9 +64,16 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'trusty-juno/updates': 'trusty-updates/juno',
|
'trusty-juno/updates': 'trusty-updates/juno',
|
||||||
'trusty-updates/juno': 'trusty-updates/juno',
|
'trusty-updates/juno': 'trusty-updates/juno',
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
'juno/proposed': 'trusty-proposed/juno',
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||||
|
# Kilo
|
||||||
|
'kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo/updates': 'trusty-updates/kilo',
|
||||||
|
'trusty-updates/kilo': 'trusty-updates/kilo',
|
||||||
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
@ -149,7 +158,7 @@ def apt_install(packages, options=None, fatal=False):
|
|||||||
cmd = ['apt-get', '--assume-yes']
|
cmd = ['apt-get', '--assume-yes']
|
||||||
cmd.extend(options)
|
cmd.extend(options)
|
||||||
cmd.append('install')
|
cmd.append('install')
|
||||||
if isinstance(packages, basestring):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
@ -182,7 +191,7 @@ def apt_update(fatal=False):
|
|||||||
def apt_purge(packages, fatal=False):
|
def apt_purge(packages, fatal=False):
|
||||||
"""Purge one or more packages"""
|
"""Purge one or more packages"""
|
||||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||||
if isinstance(packages, basestring):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
@ -193,7 +202,7 @@ def apt_purge(packages, fatal=False):
|
|||||||
def apt_hold(packages, fatal=False):
|
def apt_hold(packages, fatal=False):
|
||||||
"""Hold one or more packages"""
|
"""Hold one or more packages"""
|
||||||
cmd = ['apt-mark', 'hold']
|
cmd = ['apt-mark', 'hold']
|
||||||
if isinstance(packages, basestring):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
@ -256,11 +265,11 @@ def add_source(source, key=None):
|
|||||||
elif source == 'distro':
|
elif source == 'distro':
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise SourceConfigError("Unknown source: {!r}".format(source))
|
log("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
if key:
|
if key:
|
||||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
with NamedTemporaryFile() as key_file:
|
with NamedTemporaryFile('w+') as key_file:
|
||||||
key_file.write(key)
|
key_file.write(key)
|
||||||
key_file.flush()
|
key_file.flush()
|
||||||
key_file.seek(0)
|
key_file.seek(0)
|
||||||
@ -297,14 +306,14 @@ def configure_sources(update=False,
|
|||||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||||
|
|
||||||
if isinstance(sources, basestring):
|
if isinstance(sources, six.string_types):
|
||||||
sources = [sources]
|
sources = [sources]
|
||||||
|
|
||||||
if keys is None:
|
if keys is None:
|
||||||
for source in sources:
|
for source in sources:
|
||||||
add_source(source, None)
|
add_source(source, None)
|
||||||
else:
|
else:
|
||||||
if isinstance(keys, basestring):
|
if isinstance(keys, six.string_types):
|
||||||
keys = [keys]
|
keys = [keys]
|
||||||
|
|
||||||
if len(sources) != len(keys):
|
if len(sources) != len(keys):
|
||||||
@ -401,7 +410,7 @@ def _run_apt_command(cmd, fatal=False):
|
|||||||
while result is None or result == APT_NO_LOCK:
|
while result is None or result == APT_NO_LOCK:
|
||||||
try:
|
try:
|
||||||
result = subprocess.check_call(cmd, env=env)
|
result = subprocess.check_call(cmd, env=env)
|
||||||
except subprocess.CalledProcessError, e:
|
except subprocess.CalledProcessError as e:
|
||||||
retry_count = retry_count + 1
|
retry_count = retry_count + 1
|
||||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||||
raise
|
raise
|
||||||
|
@ -1,8 +1,23 @@
|
|||||||
import os
|
import os
|
||||||
import urllib2
|
|
||||||
from urllib import urlretrieve
|
|
||||||
import urlparse
|
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import re
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
from urllib.request import (
|
||||||
|
build_opener, install_opener, urlopen, urlretrieve,
|
||||||
|
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||||
|
)
|
||||||
|
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||||
|
from urllib.error import URLError
|
||||||
|
else:
|
||||||
|
from urllib import urlretrieve
|
||||||
|
from urllib2 import (
|
||||||
|
build_opener, install_opener, urlopen,
|
||||||
|
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||||
|
URLError
|
||||||
|
)
|
||||||
|
from urlparse import urlparse, urlunparse, parse_qs
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
@ -15,6 +30,24 @@ from charmhelpers.payload.archive import (
|
|||||||
from charmhelpers.core.host import mkdir, check_hash
|
from charmhelpers.core.host import mkdir, check_hash
|
||||||
|
|
||||||
|
|
||||||
|
def splituser(host):
|
||||||
|
'''urllib.splituser(), but six's support of this seems broken'''
|
||||||
|
_userprog = re.compile('^(.*)@(.*)$')
|
||||||
|
match = _userprog.match(host)
|
||||||
|
if match:
|
||||||
|
return match.group(1, 2)
|
||||||
|
return None, host
|
||||||
|
|
||||||
|
|
||||||
|
def splitpasswd(user):
|
||||||
|
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||||
|
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||||
|
match = _passwdprog.match(user)
|
||||||
|
if match:
|
||||||
|
return match.group(1, 2)
|
||||||
|
return user, None
|
||||||
|
|
||||||
|
|
||||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
"""
|
"""
|
||||||
Handler to download archive files from arbitrary URLs.
|
Handler to download archive files from arbitrary URLs.
|
||||||
@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
"""
|
"""
|
||||||
# propogate all exceptions
|
# propogate all exceptions
|
||||||
# URLError, OSError, etc
|
# URLError, OSError, etc
|
||||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||||
if proto in ('http', 'https'):
|
if proto in ('http', 'https'):
|
||||||
auth, barehost = urllib2.splituser(netloc)
|
auth, barehost = splituser(netloc)
|
||||||
if auth is not None:
|
if auth is not None:
|
||||||
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
|
source = urlunparse((proto, barehost, path, params, query, fragment))
|
||||||
username, password = urllib2.splitpasswd(auth)
|
username, password = splitpasswd(auth)
|
||||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
passman = HTTPPasswordMgrWithDefaultRealm()
|
||||||
# Realm is set to None in add_password to force the username and password
|
# Realm is set to None in add_password to force the username and password
|
||||||
# to be used whatever the realm
|
# to be used whatever the realm
|
||||||
passman.add_password(None, source, username, password)
|
passman.add_password(None, source, username, password)
|
||||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
authhandler = HTTPBasicAuthHandler(passman)
|
||||||
opener = urllib2.build_opener(authhandler)
|
opener = build_opener(authhandler)
|
||||||
urllib2.install_opener(opener)
|
install_opener(opener)
|
||||||
response = urllib2.urlopen(source)
|
response = urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'w') as dest_file:
|
||||||
dest_file.write(response.read())
|
dest_file.write(response.read())
|
||||||
@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||||
try:
|
try:
|
||||||
self.download(source, dld_file)
|
self.download(source, dld_file)
|
||||||
except urllib2.URLError as e:
|
except URLError as e:
|
||||||
raise UnhandledSource(e.reason)
|
raise UnhandledSource(e.reason)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
options = urlparse.parse_qs(url_parts.fragment)
|
options = parse_qs(url_parts.fragment)
|
||||||
for key, value in options.items():
|
for key, value in options.items():
|
||||||
if key in hashlib.algorithms:
|
if not six.PY3:
|
||||||
|
algorithms = hashlib.algorithms
|
||||||
|
else:
|
||||||
|
algorithms = hashlib.algorithms_available
|
||||||
|
if key in algorithms:
|
||||||
check_hash(dld_file, value, key)
|
check_hash(dld_file, value, key)
|
||||||
if checksum:
|
if checksum:
|
||||||
check_hash(dld_file, checksum, hash_type)
|
check_hash(dld_file, checksum, hash_type)
|
||||||
|
@ -5,6 +5,10 @@ from charmhelpers.fetch import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
raise ImportError('bzrlib does not support Python3')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from bzrlib.branch import Branch
|
from bzrlib.branch import Branch
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
|||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
branch_name)
|
branch_name)
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
self.branch(source, dest_dir)
|
self.branch(source, dest_dir)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
@ -5,6 +5,10 @@ from charmhelpers.fetch import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
raise ImportError('GitPython does not support Python 3')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from git import Repo
|
from git import Repo
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -17,7 +21,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
"""Handler for git branches via generic and github URLs"""
|
"""Handler for git branches via generic and github URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
#TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
if url_parts.scheme not in ('http', 'https', 'git'):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -30,13 +34,16 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
repo = Repo.clone_from(source, dest)
|
repo = Repo.clone_from(source, dest)
|
||||||
repo.git.checkout(branch)
|
repo.git.checkout(branch)
|
||||||
|
|
||||||
def install(self, source, branch="master"):
|
def install(self, source, branch="master", dest=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
if dest:
|
||||||
branch_name)
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch)
|
self.clone(source, dest_dir, branch)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
1
hooks/cluster-relation-changed
Symbolic link
1
hooks/cluster-relation-changed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
hooks.py
|
1
hooks/cluster-relation-joined
Symbolic link
1
hooks/cluster-relation-joined
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
hooks.py
|
1
hooks/ha-relation-changed
Symbolic link
1
hooks/ha-relation-changed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
hooks.py
|
1
hooks/ha-relation-joined
Symbolic link
1
hooks/ha-relation-joined
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
hooks.py
|
126
hooks/hooks.py
126
hooks/hooks.py
@ -13,7 +13,6 @@ import sys
|
|||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
import ceph
|
import ceph
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
relation_get,
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
@ -22,7 +21,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
unit_get,
|
unit_get,
|
||||||
open_port,
|
open_port,
|
||||||
relation_set,
|
relation_set,
|
||||||
log,
|
log, ERROR,
|
||||||
Hooks, UnregisteredHookError,
|
Hooks, UnregisteredHookError,
|
||||||
)
|
)
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
@ -39,14 +38,27 @@ from utils import (
|
|||||||
render_template,
|
render_template,
|
||||||
get_host_ip,
|
get_host_ip,
|
||||||
enable_pocket,
|
enable_pocket,
|
||||||
is_apache_24
|
is_apache_24,
|
||||||
|
CEPHRG_HA_RES,
|
||||||
|
register_configs,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.payload.execd import execd_preinstall
|
from charmhelpers.payload.execd import execd_preinstall
|
||||||
from charmhelpers.core.host import cmp_pkgrevno
|
from charmhelpers.core.host import cmp_pkgrevno
|
||||||
from socket import gethostname as get_unit_hostname
|
from socket import gethostname as get_unit_hostname
|
||||||
|
|
||||||
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_iface_for_address,
|
||||||
|
get_netmask_for_address,
|
||||||
|
is_ipv6,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.ip import (
|
||||||
|
resolve_address,
|
||||||
|
PUBLIC, INTERNAL, ADMIN,
|
||||||
|
)
|
||||||
|
|
||||||
hooks = Hooks()
|
hooks = Hooks()
|
||||||
|
CONFIGS = register_configs()
|
||||||
|
|
||||||
|
|
||||||
def install_www_scripts():
|
def install_www_scripts():
|
||||||
@ -75,6 +87,7 @@ def install_ceph_optimised_packages():
|
|||||||
PACKAGES = [
|
PACKAGES = [
|
||||||
'radosgw',
|
'radosgw',
|
||||||
'ntp',
|
'ntp',
|
||||||
|
'haproxy',
|
||||||
]
|
]
|
||||||
|
|
||||||
APACHE_PACKAGES = [
|
APACHE_PACKAGES = [
|
||||||
@ -159,18 +172,27 @@ def apache_reload():
|
|||||||
subprocess.call(['service', 'apache2', 'reload'])
|
subprocess.call(['service', 'apache2', 'reload'])
|
||||||
|
|
||||||
|
|
||||||
|
def apache_ports():
|
||||||
|
shutil.copy('files/ports.conf', '/etc/apache2/ports.conf')
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('upgrade-charm',
|
@hooks.hook('upgrade-charm',
|
||||||
'config-changed')
|
'config-changed')
|
||||||
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
|
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'],
|
||||||
|
'/etc/haproxy/haproxy.cfg': ['haproxy']})
|
||||||
def config_changed():
|
def config_changed():
|
||||||
install_packages()
|
install_packages()
|
||||||
emit_cephconf()
|
emit_cephconf()
|
||||||
|
CONFIGS.write_all()
|
||||||
if not config('use-embedded-webserver'):
|
if not config('use-embedded-webserver'):
|
||||||
emit_apacheconf()
|
emit_apacheconf()
|
||||||
install_www_scripts()
|
install_www_scripts()
|
||||||
apache_sites()
|
apache_sites()
|
||||||
apache_modules()
|
apache_modules()
|
||||||
|
apache_ports()
|
||||||
apache_reload()
|
apache_reload()
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
identity_joined(relid=r_id)
|
||||||
|
|
||||||
|
|
||||||
def get_mon_hosts():
|
def get_mon_hosts():
|
||||||
@ -223,6 +245,7 @@ def get_keystone_conf():
|
|||||||
|
|
||||||
@hooks.hook('mon-relation-departed',
|
@hooks.hook('mon-relation-departed',
|
||||||
'mon-relation-changed')
|
'mon-relation-changed')
|
||||||
|
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
|
||||||
def mon_relation():
|
def mon_relation():
|
||||||
emit_cephconf()
|
emit_cephconf()
|
||||||
key = relation_get('radosgw_key')
|
key = relation_get('radosgw_key')
|
||||||
@ -252,29 +275,116 @@ def restart():
|
|||||||
open_port(port=80)
|
open_port(port=80)
|
||||||
|
|
||||||
|
|
||||||
|
# XXX Define local canonical_url until charm has been updated to use the
|
||||||
|
# standard context architecture.
|
||||||
|
def canonical_url(configs, endpoint_type=PUBLIC):
|
||||||
|
scheme = 'http'
|
||||||
|
address = resolve_address(endpoint_type)
|
||||||
|
if is_ipv6(address):
|
||||||
|
address = "[{}]".format(address)
|
||||||
|
return '%s://%s' % (scheme, address)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('identity-service-relation-joined')
|
@hooks.hook('identity-service-relation-joined')
|
||||||
def identity_joined(relid=None):
|
def identity_joined(relid=None):
|
||||||
if cmp_pkgrevno('radosgw', '0.55') < 0:
|
if cmp_pkgrevno('radosgw', '0.55') < 0:
|
||||||
log('Integration with keystone requires ceph >= 0.55')
|
log('Integration with keystone requires ceph >= 0.55')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
hostname = unit_get('private-address')
|
port = 80
|
||||||
admin_url = 'http://{}:80/swift'.format(hostname)
|
admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port)
|
||||||
internal_url = public_url = '{}/v1'.format(admin_url)
|
internal_url = '%s:%s/swift/v1' % \
|
||||||
|
(canonical_url(INTERNAL), port)
|
||||||
|
public_url = '%s:%s/swift/v1' % \
|
||||||
|
(canonical_url(PUBLIC), port)
|
||||||
relation_set(service='swift',
|
relation_set(service='swift',
|
||||||
region=config('region'),
|
region=config('region'),
|
||||||
public_url=public_url, internal_url=internal_url,
|
public_url=public_url, internal_url=internal_url,
|
||||||
admin_url=admin_url,
|
admin_url=admin_url,
|
||||||
requested_roles=config('operator-roles'),
|
requested_roles=config('operator-roles'),
|
||||||
rid=relid)
|
relation_id=relid)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('identity-service-relation-changed')
|
@hooks.hook('identity-service-relation-changed')
|
||||||
|
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
|
||||||
def identity_changed():
|
def identity_changed():
|
||||||
emit_cephconf()
|
emit_cephconf()
|
||||||
restart()
|
restart()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('cluster-relation-changed',
|
||||||
|
'cluster-relation-joined')
|
||||||
|
@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']})
|
||||||
|
def cluster_changed():
|
||||||
|
CONFIGS.write_all()
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
identity_joined(relid=r_id)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ha-relation-joined')
|
||||||
|
def ha_relation_joined():
|
||||||
|
# Obtain the config values necessary for the cluster config. These
|
||||||
|
# include multicast port and interface to bind to.
|
||||||
|
corosync_bindiface = config('ha-bindiface')
|
||||||
|
corosync_mcastport = config('ha-mcastport')
|
||||||
|
vip = config('vip')
|
||||||
|
if not vip:
|
||||||
|
log('Unable to configure hacluster as vip not provided',
|
||||||
|
level=ERROR)
|
||||||
|
sys.exit(1)
|
||||||
|
# Obtain resources
|
||||||
|
# SWIFT_HA_RES = 'grp_swift_vips'
|
||||||
|
resources = {
|
||||||
|
'res_cephrg_haproxy': 'lsb:haproxy'
|
||||||
|
}
|
||||||
|
resource_params = {
|
||||||
|
'res_cephrg_haproxy': 'op monitor interval="5s"'
|
||||||
|
}
|
||||||
|
|
||||||
|
vip_group = []
|
||||||
|
for vip in vip.split():
|
||||||
|
iface = get_iface_for_address(vip)
|
||||||
|
if iface is not None:
|
||||||
|
vip_key = 'res_cephrg_{}_vip'.format(iface)
|
||||||
|
resources[vip_key] = 'ocf:heartbeat:IPaddr2'
|
||||||
|
resource_params[vip_key] = (
|
||||||
|
'params ip="{vip}" cidr_netmask="{netmask}"'
|
||||||
|
' nic="{iface}"'.format(vip=vip,
|
||||||
|
iface=iface,
|
||||||
|
netmask=get_netmask_for_address(vip))
|
||||||
|
)
|
||||||
|
vip_group.append(vip_key)
|
||||||
|
|
||||||
|
if len(vip_group) >= 1:
|
||||||
|
relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)})
|
||||||
|
|
||||||
|
init_services = {
|
||||||
|
'res_cephrg_haproxy': 'haproxy'
|
||||||
|
}
|
||||||
|
clones = {
|
||||||
|
'cl_cephrg_haproxy': 'res_cephrg_haproxy'
|
||||||
|
}
|
||||||
|
|
||||||
|
relation_set(init_services=init_services,
|
||||||
|
corosync_bindiface=corosync_bindiface,
|
||||||
|
corosync_mcastport=corosync_mcastport,
|
||||||
|
resources=resources,
|
||||||
|
resource_params=resource_params,
|
||||||
|
clones=clones)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ha-relation-changed')
|
||||||
|
def ha_relation_changed():
|
||||||
|
clustered = relation_get('clustered')
|
||||||
|
if clustered:
|
||||||
|
log('Cluster configured, notifying other services and'
|
||||||
|
'updating keystone endpoint configuration')
|
||||||
|
# Tell all related services to start using
|
||||||
|
# the VIP instead
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
identity_joined(relid=r_id)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
try:
|
try:
|
||||||
hooks.execute(sys.argv)
|
hooks.execute(sys.argv)
|
||||||
|
@ -10,11 +10,26 @@
|
|||||||
import socket
|
import socket
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
|
from copy import deepcopy
|
||||||
|
from collections import OrderedDict
|
||||||
from charmhelpers.core.hookenv import unit_get
|
from charmhelpers.core.hookenv import unit_get
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install
|
||||||
|
from charmhelpers.contrib.openstack import context, templating
|
||||||
|
|
||||||
|
import ceph_radosgw_context
|
||||||
|
|
||||||
|
CEPHRG_HA_RES = 'grp_cephrg_vips'
|
||||||
TEMPLATES_DIR = 'templates'
|
TEMPLATES_DIR = 'templates'
|
||||||
|
TEMPLATES = 'templates/'
|
||||||
|
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||||
|
|
||||||
|
BASE_RESOURCE_MAP = OrderedDict([
|
||||||
|
(HAPROXY_CONF, {
|
||||||
|
'contexts': [context.HAProxyContext(singlenode_mode=True),
|
||||||
|
ceph_radosgw_context.HAProxyContext()],
|
||||||
|
'services': ['haproxy'],
|
||||||
|
}),
|
||||||
|
])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import jinja2
|
import jinja2
|
||||||
@ -29,6 +44,25 @@ except ImportError:
|
|||||||
import dns.resolver
|
import dns.resolver
|
||||||
|
|
||||||
|
|
||||||
|
def resource_map():
|
||||||
|
'''
|
||||||
|
Dynamically generate a map of resources that will be managed for a single
|
||||||
|
hook execution.
|
||||||
|
'''
|
||||||
|
resource_map = deepcopy(BASE_RESOURCE_MAP)
|
||||||
|
return resource_map
|
||||||
|
|
||||||
|
|
||||||
|
# Hardcoded to icehouse to enable use of charmhelper templating/context tools
|
||||||
|
# Ideally these function would support non-OpenStack services
|
||||||
|
def register_configs(release='icehouse'):
|
||||||
|
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
||||||
|
openstack_release=release)
|
||||||
|
for cfg, rscs in resource_map().iteritems():
|
||||||
|
configs.register(cfg, rscs['contexts'])
|
||||||
|
return configs
|
||||||
|
|
||||||
|
|
||||||
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
||||||
templates = jinja2.Environment(
|
templates = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(template_dir)
|
loader=jinja2.FileSystemLoader(template_dir)
|
||||||
|
@ -14,6 +14,12 @@ requires:
|
|||||||
interface: ceph-radosgw
|
interface: ceph-radosgw
|
||||||
identity-service:
|
identity-service:
|
||||||
interface: keystone
|
interface: keystone
|
||||||
|
ha:
|
||||||
|
interface: hacluster
|
||||||
|
scope: container
|
||||||
provides:
|
provides:
|
||||||
gateway:
|
gateway:
|
||||||
interface: http
|
interface: http
|
||||||
|
peers:
|
||||||
|
cluster:
|
||||||
|
interface: swift-ha
|
||||||
|
@ -17,7 +17,7 @@ keyring = /etc/ceph/keyring.rados.gateway
|
|||||||
rgw socket path = /tmp/radosgw.sock
|
rgw socket path = /tmp/radosgw.sock
|
||||||
log file = /var/log/ceph/radosgw.log
|
log file = /var/log/ceph/radosgw.log
|
||||||
{% if embedded_webserver %}
|
{% if embedded_webserver %}
|
||||||
rgw frontends = civetweb port=80
|
rgw frontends = civetweb port=70
|
||||||
{% else %}
|
{% else %}
|
||||||
# Turn off 100-continue optimization as stock mod_fastcgi
|
# Turn off 100-continue optimization as stock mod_fastcgi
|
||||||
# does not support it
|
# does not support it
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
|
FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
|
||||||
</IfModule>
|
</IfModule>
|
||||||
|
|
||||||
<VirtualHost *:80>
|
<VirtualHost *:70>
|
||||||
ServerName {{ hostname }}
|
ServerName {{ hostname }}
|
||||||
ServerAdmin ceph@ubuntu.com
|
ServerAdmin ceph@ubuntu.com
|
||||||
DocumentRoot /var/www
|
DocumentRoot /var/www
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||||
|
# only standard libraries.
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import six # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||||
|
import six # flake8: noqa
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||||
|
import yaml # flake8: noqa
|
@ -1,6 +1,6 @@
|
|||||||
import amulet
|
import amulet
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
class AmuletDeployment(object):
|
class AmuletDeployment(object):
|
||||||
@ -52,12 +52,12 @@ class AmuletDeployment(object):
|
|||||||
|
|
||||||
def _add_relations(self, relations):
|
def _add_relations(self, relations):
|
||||||
"""Add all of the relations for the services."""
|
"""Add all of the relations for the services."""
|
||||||
for k, v in relations.iteritems():
|
for k, v in six.iteritems(relations):
|
||||||
self.d.relate(k, v)
|
self.d.relate(k, v)
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
for service, config in configs.iteritems():
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
def _deploy(self):
|
def _deploy(self):
|
||||||
|
@ -5,6 +5,8 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
class AmuletUtils(object):
|
class AmuletUtils(object):
|
||||||
"""Amulet utilities.
|
"""Amulet utilities.
|
||||||
@ -58,7 +60,7 @@ class AmuletUtils(object):
|
|||||||
Verify the specified services are running on the corresponding
|
Verify the specified services are running on the corresponding
|
||||||
service units.
|
service units.
|
||||||
"""
|
"""
|
||||||
for k, v in commands.iteritems():
|
for k, v in six.iteritems(commands):
|
||||||
for cmd in v:
|
for cmd in v:
|
||||||
output, code = k.run(cmd)
|
output, code = k.run(cmd)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
@ -100,11 +102,11 @@ class AmuletUtils(object):
|
|||||||
longs, or can be a function that evaluate a variable and returns a
|
longs, or can be a function that evaluate a variable and returns a
|
||||||
bool.
|
bool.
|
||||||
"""
|
"""
|
||||||
for k, v in expected.iteritems():
|
for k, v in six.iteritems(expected):
|
||||||
if k in actual:
|
if k in actual:
|
||||||
if (isinstance(v, basestring) or
|
if (isinstance(v, six.string_types) or
|
||||||
isinstance(v, bool) or
|
isinstance(v, bool) or
|
||||||
isinstance(v, (int, long))):
|
isinstance(v, six.integer_types)):
|
||||||
if v != actual[k]:
|
if v != actual[k]:
|
||||||
return "{}:{}".format(k, actual[k])
|
return "{}:{}".format(k, actual[k])
|
||||||
elif not v(actual[k]):
|
elif not v(actual[k]):
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import six
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
for service, config in configs.iteritems():
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
|
@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
|
|||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
)
|
)
|
||||||
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
expected service catalog endpoints.
|
expected service catalog endpoints.
|
||||||
"""
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for k, v in expected.iteritems():
|
for k, v in six.iteritems(expected):
|
||||||
if k in actual:
|
if k in actual:
|
||||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||||
if ret:
|
if ret:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user