From 502992535f40d38e57519e3380c802785aa3b747 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:17:25 +0000 Subject: [PATCH] Sync charm helpers for Mitaka awareness --- hooks/charmhelpers/contrib/network/ip.py | 55 ++- .../contrib/openstack/amulet/deployment.py | 13 +- .../charmhelpers/contrib/openstack/context.py | 52 ++- .../contrib/openstack/files/check_haproxy.sh | 12 +- .../charmhelpers/contrib/openstack/neutron.py | 24 +- .../contrib/openstack/templates/haproxy.cfg | 5 +- .../templates/section-keystone-authtoken | 11 + hooks/charmhelpers/contrib/openstack/utils.py | 287 +++++++++--- hooks/charmhelpers/contrib/python/packages.py | 46 +- .../contrib/storage/linux/ceph.py | 416 ++++++++++++++++-- hooks/charmhelpers/core/hookenv.py | 48 +- hooks/charmhelpers/core/host.py | 138 ++++-- hooks/charmhelpers/core/services/helpers.py | 16 +- hooks/charmhelpers/core/templating.py | 20 +- hooks/charmhelpers/fetch/__init__.py | 10 +- hooks/charmhelpers/fetch/archiveurl.py | 2 +- hooks/charmhelpers/fetch/bzrurl.py | 54 +-- hooks/charmhelpers/fetch/giturl.py | 41 +- .../contrib/openstack/amulet/deployment.py | 13 +- 19 files changed, 992 insertions(+), 271 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 7f3b66b1..4efe7993 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -53,7 +53,7 @@ def _validate_cidr(network): def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) + errmsg = ("No IP address found in network(s): %s" % network) raise ValueError(errmsg) @@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). @@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False): else: return None - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -454,3 +456,18 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..d2ede320 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: @@ -225,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 61073cd3..a8c6ab0c 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -57,6 +57,7 @@ from charmhelpers.core.host import ( get_nic_hwaddr, mkdir, write_file, + pwgen, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -87,6 +88,14 @@ from charmhelpers.contrib.network.ip import ( is_bridge_member, ) from charmhelpers.contrib.openstack.utils import get_host_ip +from charmhelpers.core.unitdata import kv + +try: + import psutil +except ImportError: + apt_install('python-psutil', fatal=True) + import psutil + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] @@ -401,6 +410,7 @@ class IdentityServiceContext(OSContextGenerator): auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, @@ -409,7 +419,8 @@ class IdentityServiceContext(OSContextGenerator): 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol}) + 'auth_protocol': auth_protocol, + 'api_version': api_version}) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse @@ -636,11 +647,18 @@ class HAProxyContext(OSContextGenerator): ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' - ctxt['stat_port'] = ':::8888' else: ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' - ctxt['stat_port'] = ':8888' + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or @@ -1094,6 +1112,20 @@ class OSConfigFlagContext(OSContextGenerator): config_flags_parser(config_flags)} +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + class SubordinateConfigContext(OSContextGenerator): """ @@ -1234,13 +1266,11 @@ class WorkerConfigContext(OSContextGenerator): @property def num_cpus(self): - try: - from psutil import NUM_CPUS - except ImportError: - apt_install('python-psutil', fatal=True) - from psutil import NUM_CPUS - - return NUM_CPUS + # NOTE: use cpu_count if present (16.04 support) + if hasattr(psutil, 'cpu_count'): + return psutil.cpu_count() + else: + return psutil.NUM_CPUS def __call__(self): multiplier = config('worker-multiplier') or 0 @@ -1443,6 +1473,8 @@ class NetworkServiceContext(OSContextGenerator): rdata.get('service_protocol') or 'http', 'auth_protocol': rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', } if self.context_complete(ctxt): return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh index eb8527f5..0df07176 100755 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -9,15 +9,17 @@ CRITICAL=0 NOTACTIVE='' LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') -for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); +typeset -i N_INSTANCES=0 +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) do - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') + N_INSTANCES=N_INSTANCES+1 + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') if [ $? != 0 ]; then date >> $LOGFILE echo $output >> $LOGFILE - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 CRITICAL=1 NOTACTIVE="${NOTACTIVE} $appserver" fi @@ -28,5 +30,5 @@ if [ $CRITICAL = 1 ]; then exit 2 fi -echo "OK: All haproxy instances looking good" +echo "OK: All haproxy instances ($N_INSTANCES) looking good" exit 0 diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index d17c847e..dbc489ab 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -50,7 +50,7 @@ def determine_dkms_package(): if kernel_version() >= (3, 13): return [] else: - return ['openvswitch-datapath-dkms'] + return [headers_package(), 'openvswitch-datapath-dkms'] # legacy @@ -70,7 +70,7 @@ def quantum_plugins(): relation_prefix='neutron', ssl_dir=QUANTUM_CONF_DIR)], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], 'server_packages': ['quantum-server', 'quantum-plugin-openvswitch'], @@ -111,7 +111,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], 'server_packages': ['neutron-server', 'neutron-plugin-openvswitch'], @@ -155,7 +155,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], @@ -174,7 +174,7 @@ def neutron_plugins(): 'neutron-dhcp-agent', 'nova-api-metadata', 'etcd'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['calico-compute', 'bird', 'neutron-dhcp-agent', @@ -219,7 +219,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [[headers_package()] + determine_dkms_package()], + 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', 'python-neutron-plugin-midonet'], 'server_services': ['neutron-server'] @@ -233,6 +233,18 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] + if release >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if release >= 'liberty': + midonet_origin = config('midonet-origin') + if midonet_origin is not None and midonet_origin[4:5] == '1': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') return plugins diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 8721d8a1..32b62767 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -33,13 +33,14 @@ defaults timeout server 30000 {%- endif %} -listen stats {{ stat_port }} +listen stats + bind {{ local_host }}:{{ stat_port }} mode http stats enable stats hide-version stats realm Haproxy\ Statistics stats uri / - stats auth admin:password + stats auth admin:{{ stat_password }} {% if frontends -%} {% for service, ports in service_ports.items() -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 2a37edd5..0b6da25c 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -1,4 +1,14 @@ {% if auth_host -%} +{% if api_version == '3' -%} +[keystone_authtoken] +auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +project_domain_name = default +user_domain_name = default +auth_plugin = password +{% else -%} [keystone_authtoken] identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} @@ -7,3 +17,4 @@ admin_user = {{ admin_user }} admin_password = {{ admin_password }} signing_dir = {{ signing_dir }} {% endif -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index fc479a30..80dd2e0d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,8 +23,10 @@ import json import os import sys import re +import itertools import six +import tempfile import traceback import uuid import yaml @@ -41,6 +43,7 @@ from charmhelpers.core.hookenv import ( config, log as juju_log, charm_dir, + DEBUG, INFO, related_units, relation_ids, @@ -58,6 +61,7 @@ from charmhelpers.contrib.storage.linux.lvm import ( from charmhelpers.contrib.network.ip import ( get_ipv6_addr, is_ipv6, + port_has_listener, ) from charmhelpers.contrib.python.packages import ( @@ -65,7 +69,7 @@ from charmhelpers.contrib.python.packages import ( pip_install, ) -from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.core.host import lsb_release, mounts, umount, service_running from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -86,6 +90,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) @@ -99,61 +104,70 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2014.2', 'juno'), ('2015.1', 'kilo'), ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), ]) -# The ugly duckling +# The ugly duckling - must list releases oldest to newest SWIFT_CODENAMES = OrderedDict([ - ('1.4.3', 'diablo'), - ('1.4.8', 'essex'), - ('1.7.4', 'folsom'), - ('1.8.0', 'grizzly'), - ('1.7.7', 'grizzly'), - ('1.7.6', 'grizzly'), - ('1.10.0', 'havana'), - ('1.9.1', 'havana'), - ('1.9.0', 'havana'), - ('1.13.1', 'icehouse'), - ('1.13.0', 'icehouse'), - ('1.12.0', 'icehouse'), - ('1.11.0', 'icehouse'), - ('2.0.0', 'juno'), - ('2.1.0', 'juno'), - ('2.2.0', 'juno'), - ('2.2.1', 'kilo'), - ('2.2.2', 'kilo'), - ('2.3.0', 'liberty'), - ('2.4.0', 'liberty'), - ('2.5.0', 'liberty'), + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0']), ]) # >= Liberty version->codename mapping PACKAGE_CODENAMES = { 'nova-common': OrderedDict([ - ('12.0.0', 'liberty'), + ('12.0', 'liberty'), + ('13.0', 'mitaka'), ]), 'neutron-common': OrderedDict([ - ('7.0.0', 'liberty'), + ('7.0', 'liberty'), + ('8.0', 'mitaka'), ]), 'cinder-common': OrderedDict([ - ('7.0.0', 'liberty'), + ('7.0', 'liberty'), + ('8.0', 'mitaka'), ]), 'keystone': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), 'horizon-common': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), 'ceilometer-common': OrderedDict([ - ('5.0.0', 'liberty'), + ('5.0', 'liberty'), + ('6.0', 'mitaka'), ]), 'heat-common': OrderedDict([ - ('5.0.0', 'liberty'), + ('5.0', 'liberty'), + ('6.0', 'mitaka'), ]), 'glance-common': OrderedDict([ - ('11.0.0', 'liberty'), + ('11.0', 'liberty'), + ('12.0', 'mitaka'), ]), 'openstack-dashboard': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), } @@ -216,6 +230,33 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): error_out(e) +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + return None + + def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt @@ -240,7 +281,14 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + if match: vers = match.group(0) @@ -252,13 +300,8 @@ def get_os_codename_package(package, fatal=True): # < Liberty co-ordinated project versions try: if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] + return get_swift_codename(vers) else: - vers = vers[:6] return OPENSTACK_CODENAMES[vers] except KeyError: if not fatal: @@ -276,12 +319,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] else: vers_map = OPENSTACK_CODENAMES - - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -306,12 +351,42 @@ def os_release(package, base='essex'): def import_key(keyid): - cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + juju_log("Importing ASCII Armor PGP key", level=DEBUG) + with tempfile.NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + else: + juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) + juju_log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + + +def get_source_and_pgp_key(input): + """Look for a pgp key ID or ascii-armor key in the given input.""" + index = input.strip() + index = input.rfind('|') + if index < 0: + return input, None + + key = input[index + 1:].strip('|') + source = input[:index] + return source, key def configure_installation_source(rel): @@ -323,16 +398,16 @@ def configure_installation_source(rel): with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(DISTRO_PROPOSED % ubuntu_rel) elif rel[:4] == "ppa:": - src = rel + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + subprocess.check_call(["add-apt-repository", "-y", src]) elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) + src, key = get_source_and_pgp_key(rel) + if key: import_key(key) - elif l == 1: - src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(src) elif rel[:6] == 'cloud:': @@ -377,6 +452,9 @@ def configure_installation_source(rel): 'liberty': 'trusty-updates/liberty', 'liberty/updates': 'trusty-updates/liberty', 'liberty/proposed': 'trusty-proposed/liberty', + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', } try: @@ -444,11 +522,16 @@ def openstack_upgrade_available(package): cur_vers = get_os_version_package(package) if "swift" in package: codename = get_os_codename_install_source(src) - available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + avail_vers = get_os_version_codename_swift(codename) else: - available_vers = get_os_version_install_source(src) + avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(available_vers, cur_vers) == 1 + if "swift" in package: + major_cur_vers = cur_vers.split('.', 1)[0] + major_avail_vers = avail_vers.split('.', 1)[0] + major_diff = apt.version_compare(major_avail_vers, major_cur_vers) + return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) + return apt.version_compare(avail_vers, cur_vers) == 1 def ensure_block_device(block_device): @@ -577,7 +660,7 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) -def git_clone_and_install(projects_yaml, core_project, depth=1): +def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -627,6 +710,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): for p in projects['repositories']: repo = p['repository'] branch = p['branch'] + depth = '1' + if 'depth' in p.keys(): + depth = p['depth'] if p['name'] == 'requirements': repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -671,19 +757,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, """ Clone and install a single git repository. """ - dest_dir = os.path.join(parent_dir, os.path.basename(repo)) - if not os.path.exists(parent_dir): juju_log('Directory already exists at {}. ' 'No need to create directory.'.format(parent_dir)) os.mkdir(parent_dir) - if not os.path.exists(dest_dir): - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, - depth=depth) - else: - repo_dir = dest_dir + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') @@ -782,13 +862,23 @@ def os_workload_status(configs, required_interfaces, charm_func=None): return wrap -def set_os_workload_status(configs, required_interfaces, charm_func=None): +def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None): """ Set workload status based on complete contexts. status-set missing or incomplete contexts and juju-log details of missing required data. charm_func is a charm specific function to run checking for charm specific requirements such as a VIP setting. + + This function also checks for whether the services defined are ACTUALLY + running and that the ports they advertise are open and being listened to. + + @param services - OPTIONAL: a [{'service': , 'ports': []] + The ports are optional. + If services is a [] then ports are ignored. + @param ports - OPTIONAL: an [] representing ports that shoudl be + open. + @returns None """ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) state = 'active' @@ -867,6 +957,65 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): else: message = charm_message + # If the charm thinks the unit is active, check that the actual services + # really are active. + if services is not None and state == 'active': + # if we're passed the dict() then just grab the values as a list. + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = [] + for s in services: + if isinstance(s, dict) and 'service' in s: + _s.append(s['service']) + if isinstance(s, str): + _s.append(s) + services_running = [service_running(s) for s in _s] + if not all(services_running): + not_running = [s for s, running in zip(_s, services_running) + if not running] + message = ("Services not running that should be: {}" + .format(", ".join(not_running))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + port_map = OrderedDict([(s['service'], s['ports']) + for s in services if 'ports' in s]) + if state == 'active' and port_map: + all_ports = list(itertools.chain(*port_map.values())) + ports_open = [port_has_listener('0.0.0.0', p) + for p in all_ports] + if not all(ports_open): + not_opened = [p for p, opened in zip(all_ports, ports_open) + if not opened] + map_not_open = OrderedDict() + for service, ports in port_map.items(): + closed_ports = set(ports).intersection(not_opened) + if closed_ports: + map_not_open[service] = closed_ports + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message = ( + "Services with ports not open that should be: {}" + .format( + ", ".join([ + "{}: [{}]".format( + service, + ", ".join([str(v) for v in ports])) + for service, ports in map_not_open.items()]))) + state = 'blocked' + + if ports is not None and state == 'active': + # and we can also check ports which we don't know the service for + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + if not all(ports_open): + message = ( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in zip(ports, ports_open) + if not v]))) + state = 'blocked' + # Set to active if all requirements have been met if state == 'active': message = "Unit is ready" diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index 10b32e33..a2411c37 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -19,20 +19,35 @@ import os import subprocess +import sys from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import charm_dir, log -try: - from pip import main as pip_execute -except ImportError: - apt_update() - apt_install('python-pip') - from pip import main as pip_execute - __author__ = "Jorge Niedbalski " +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): @@ -42,8 +57,12 @@ def parse_options(given, available): yield "--{0}={1}".format(key, value) -def pip_install_requirements(requirements, **options): - """Install a requirements file """ +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ command = ["install"] available_options = ('proxy', 'src', 'log', ) @@ -51,8 +70,13 @@ def pip_install_requirements(requirements, **options): command.append(option) command.append("-r {0}".format(requirements)) - log("Installing from file: {} with options: {}".format(requirements, - command)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) pip_execute(command) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1235389e..60ae52b8 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,10 +23,11 @@ # James Page # Adam Gandelman # +import bisect +import six import os import shutil -import six import json import time import uuid @@ -73,6 +74,394 @@ log to syslog = {use_syslog} err to syslog = {use_syslog} clog to syslog = {use_syslog} """ +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osds = get_osds(self.service) + if not osds: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + # Calculate based on Ceph best practices + if osds < 5: + return 128 + elif 5 < osds < 10: + return 512 + elif 10 < osds < 50: + return 4096 + else: + estimate = (osds * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + pgs = self.get_pgs(self.replicas) + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None def install(): @@ -102,30 +491,6 @@ def create_rbd_image(service, pool, image, sizemb): check_call(cmd) -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) - - return None - - def update_pool(client, pool, settings): cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): @@ -414,6 +779,7 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 454b52ae..2dd70bc9 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -492,7 +492,7 @@ def relation_types(): @cached def peer_relation_id(): - '''Get a peer relation id if a peer relation has been joined, else None.''' + '''Get the peers relation id if a peers relation has been joined, else None.''' md = metadata() section = md.get('peers') if section: @@ -517,12 +517,12 @@ def relation_to_interface(relation_name): def relation_to_role_and_interface(relation_name): """ Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. """ _metadata = metadata() - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') if interface: return role, interface @@ -534,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name): """ Given a role and interface name, return a list of relation names for the current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peer``). + of ``provides``, ``requires``, or ``peers``). :returns: A list of relation names. """ @@ -555,7 +555,7 @@ def interface_to_relations(interface_name): :returns: A list of relation names. """ results = [] - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results @@ -637,7 +637,7 @@ def unit_private_ip(): @cached -def storage_get(attribute="", storage_id=""): +def storage_get(attribute=None, storage_id=None): """Get storage attributes""" _args = ['storage-get', '--format=json'] if storage_id: @@ -651,7 +651,7 @@ def storage_get(attribute="", storage_id=""): @cached -def storage_list(storage_name=""): +def storage_list(storage_name=None): """List the storage IDs for the unit""" _args = ['storage-list', '--format=json'] if storage_name: @@ -878,6 +878,40 @@ def leader_set(settings=None, **kwargs): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 579871bc..a7720906 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -72,7 +72,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): stopped = service_stop(service_name) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: @@ -80,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "disable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) return stopped @@ -94,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init", Reenable starting again at boot. Start the service""" upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): @@ -102,9 +106,9 @@ def service_resume(service_name, init_dir="/etc/init", elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "enable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) started = service_running(service_name) @@ -115,23 +119,30 @@ def service_resume(service_name, init_dir="/etc/init", def service(action, service_name): """Control a system service""" - cmd = ['service', service_name, action] + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 -def service_running(service): +def service_running(service_name): """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False + if init_is_systemd(): + return service('is-active', service_name) else: - if ("start/running" in output or "is running" in output): - return True - else: + try: + output = subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + if ("start/running" in output or "is running" in output or + "up and running" in output): + return True + else: + return False def service_available(service_name): @@ -146,8 +157,29 @@ def service_available(service_name): return True -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', system_user=False, + primary_group=None, secondary_groups=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -162,6 +194,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): '--shell', shell, '--password', password, ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -259,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -322,8 +362,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -338,10 +377,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -353,8 +391,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -369,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass @@ -474,7 +512,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -516,12 +554,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -533,6 +572,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -543,7 +583,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -552,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -562,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """ - Recursively change user and group ownership of files and directories + """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. :param bool follow_links: Also Chown links if True :param bool chowntopdir: Also chown path itself if True """ @@ -598,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) def get_total_ram(): - '''The total amount of system RAM in bytes. + """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. - ''' + """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 12d768e6..24237042 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -243,13 +243,15 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` - :param str target: The target to write the rendered template to + :param str target: The target to write the rendered template to (or None) :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template """ def __init__(self, source, target, owner='root', group='root', perms=0o444, @@ -267,12 +269,14 @@ class TemplateCallback(ManagerCallback): if self.on_change_action and os.path.isfile(self.target): pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) - context = {} + context = {'ctx': {}} for ctx in service.get('required_data', []): context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( @@ -281,6 +285,8 @@ class TemplateCallback(ManagerCallback): else: self.on_change_action() + return result + # Convenience aliases for templates render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 239719d4..d2d8eafe 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -27,7 +27,8 @@ def render(source, target, context, owner='root', group='root', The `source` path, if not absolute, is relative to the `templates_dir`. - The `target` path should be absolute. + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. The context should be a dict containing the values to be replaced in the template. @@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root', If omitted, `templates_dir` defaults to the `templates` folder in the charm. + The rendered template will be written to the file as well as being returned + as a string. + Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ @@ -67,9 +71,11 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 5f831c35..db0d86a2 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -98,6 +98,14 @@ CLOUD_ARCHIVE_POCKETS = { 'liberty/proposed': 'trusty-proposed/liberty', 'trusty-liberty/proposed': 'trusty-proposed/liberty', 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', } # The order of this list is very important. Handlers should be listed in from @@ -411,7 +419,7 @@ def plugins(fetch_handlers=None): importlib.import_module(package), classname) plugin_list.append(handler_class()) - except (ImportError, AttributeError): + except NotImplementedError: # Skip missing plugins so that they can be ommitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index efd7f9f0..b8e0943d 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): install_opener(opener) response = urlopen(source) try: - with open(dest, 'w') as dest_file: + with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 3531315a..cafd27f7 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -15,60 +15,50 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors +if filter_installed_packages(['bzr']) != []: + apt_install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) else: return True def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e + if os.path.exists(dest): + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + else: + check_call(['bzr', 'branch', source, dest]) - def install(self, source): + def install(self, source, dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index f023b26d..65ed5319 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -15,24 +15,18 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) -from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 +if filter_installed_packages(['git']) != []: + apt_install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): @@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): + if url_parts.scheme not in ('http', 'https', 'git', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) else: return True - def clone(self, source, dest, branch, depth=None): + def clone(self, source, dest, branch="master", depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if depth: - Repo.clone_from(source, dest, branch=branch, depth=depth) + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] else: - Repo.clone_from(source, dest, branch=branch) + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) @@ -62,11 +61,9 @@ class GitUrlFetchHandler(BaseFetchHandler): else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) - except GitCommandError as e: + except CalledProcessError as e: raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..d2ede320 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: @@ -225,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1]