diff --git a/.gitignore b/.gitignore index c3fd0b63..53bc7bb1 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ bin .unit-state.db .idea func-results.json +*__pycache__ +.settings diff --git a/README.md b/README.md index b0271061..edc26e05 100644 --- a/README.md +++ b/README.md @@ -10,32 +10,32 @@ available in a Ceph cluster. Usage ===== - + The charm also supports specification of the storage devices to use in the ceph cluster:: osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph-osd deployment using `juju set`. -For example:: +For example:: ceph-osd: osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - + Boot things up by using:: juju deploy -n 3 --config ceph.yaml ceph - + You can then deploy this charm by simple doing:: juju deploy -n 10 --config ceph.yaml ceph-osd juju add-relation ceph-osd ceph - + Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd charm which will scan for the configured storage devices and add them to the pool of available storage. @@ -80,9 +80,39 @@ The AppArmor profile(s) which are generated by the charm should NOT yet be used - With Bluestore enabled. +Block Device Encryption +======================= + +The ceph-osd charm supports encryption of underlying block devices supporting OSD's. + +To use the 'native' key management approach (where dm-crypt keys are stored in the +ceph-mon cluster), simply set the 'osd-encrypt' configuration option:: + + ceph-osd: + options: + osd-encrypt: True + +**NOTE:** This is supported for Ceph Jewel or later. + +Alternatively, encryption keys can be stored in Vault; this requires deployment of +the vault charm (and associated initialization of vault - see the Vault charm for +details) and configuration of the 'osd-encrypt' and 'osd-encrypt-keymanager' +options:: + + ceph-osd: + options: + osd-encrypt: True + osd-encrypt-keymanager: vault + +**NOTE:** This option is only supported with Ceph Luminous or later. + +**NOTE:** Changing these options post deployment will only take effect for any +new block devices added to the ceph-osd application; existing OSD devices will +not be encrypted. + Contact Information =================== Author: James Page -Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug -Location: http://jujucharms.com/charms/ceph-osd +Report bugs at: http://bugs.launchpad.net/charm-ceph-osd/+filebug +Location: http://jujucharms.com/ceph-osd diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index ab7e3bad..b7fd6950 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -10,7 +10,7 @@ include: - cluster - contrib.python.packages - contrib.storage.linux - - contrib.openstack.alternatives + - contrib.openstack - contrib.network.ip - contrib.openstack: - alternatives diff --git a/config.yaml b/config.yaml index b15c261f..88bf5776 100644 --- a/config.yaml +++ b/config.yaml @@ -148,6 +148,17 @@ options: . Specifying this option on a running Ceph OSD node will have no effect until new disks are added, at which point new disks will be encrypted. + osd-encrypt-keymanager: + type: string + default: ceph + description: | + Keymanager to use for storage of dm-crypt keys used for OSD devices; + by default 'ceph' itself will be used for storage of keys, making use + of the key/value storage provided by the ceph-mon cluster. + . + Alternatively 'vault' may be used for storage of dm-crypt keys. Both + approaches ensure that keys are never written to the local filesystem. + This also requires a relation to the vault charm. crush-initial-weight: type: float default: @@ -248,7 +259,7 @@ options: Availability Zone instead of specifically by host. availability_zone: type: string - default: + default: description: | Custom availability zone to provide to Ceph for the OSD placement max-sectors-kb: diff --git a/hooks/ceph_hooks.py b/hooks/ceph_hooks.py index f5b13f3d..d0681189 100755 --- a/hooks/ceph_hooks.py +++ b/hooks/ceph_hooks.py @@ -13,6 +13,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import base64 +import json import glob import os import shutil @@ -34,6 +37,7 @@ from charmhelpers.core.hookenv import ( relation_ids, related_units, relation_get, + relation_set, Hooks, UnregisteredHookError, service_name, @@ -49,7 +53,8 @@ from charmhelpers.core.host import ( service_reload, service_restart, add_to_updatedb_prunepath, - restart_on_change + restart_on_change, + write_file, ) from charmhelpers.fetch import ( add_source, @@ -59,7 +64,9 @@ from charmhelpers.fetch import ( get_upstream_version, ) from charmhelpers.core.sysctl import create as create_sysctl -from charmhelpers.contrib.openstack.context import AppArmorContext +from charmhelpers.contrib.openstack.context import ( + AppArmorContext, +) from utils import ( get_host_ip, get_networks, @@ -74,12 +81,15 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, + get_relation_ip, ) from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +import charmhelpers.contrib.openstack.vaultlocker as vaultlocker + hooks = Hooks() STORAGE_MOUNT_PATH = '/var/lib/ceph' @@ -170,6 +180,23 @@ class CephOsdAppArmorContext(AppArmorContext): return self.ctxt +def use_vaultlocker(): + """Determine whether vaultlocker should be used for OSD encryption + + :returns: whether vaultlocker should be used for key management + :rtype: bool + :raises: ValueError if vaultlocker is enable but ceph < 12.2.4""" + if (config('osd-encrypt') and + config('osd-encrypt-keymanager') == ceph.VAULT_KEY_MANAGER): + if cmp_pkgrevno('ceph', '12.2.4') < 0: + msg = ('vault usage only supported with ceph >= 12.2.4') + status_set('blocked', msg) + raise ValueError(msg) + else: + return True + return False + + def install_apparmor_profile(): """ Install ceph apparmor profiles and configure @@ -365,6 +392,14 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') @harden() def config_changed(): + # Determine whether vaultlocker is required and install + if use_vaultlocker(): + installed = len(filter_installed_packages(['vaultlocker'])) == 0 + if not installed: + add_source('ppa:openstack-charmers/vaultlocker') + apt_update(fatal=True) + apt_install('vaultlocker', fatal=True) + # Check if an upgrade was requested check_for_upgrade() @@ -390,6 +425,18 @@ def config_changed(): @hooks.hook('storage.real') def prepare_disks_and_activate(): + # NOTE: vault/vaultlocker preflight check + vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) + context = vault_kv() + if use_vaultlocker() and not vault_kv.complete: + log('Deferring OSD preparation as vault not ready', + level=DEBUG) + return + elif use_vaultlocker() and vault_kv.complete: + log('Vault ready, writing vaultlocker configuration', + level=DEBUG) + vaultlocker.write_vaultlocker_conf(context) + osd_journal = get_journal_devices() check_overlap(osd_journal, set(get_devices())) log("got journal devs: {}".format(osd_journal), level=DEBUG) @@ -407,7 +454,8 @@ def prepare_disks_and_activate(): osd_journal, config('osd-reformat'), config('ignore-device-errors'), config('osd-encrypt'), - config('bluestore')) + config('bluestore'), + config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) @@ -536,6 +584,26 @@ def update_nrpe_config(): nrpe_setup.write() +@hooks.hook('secrets-storage-relation-joined') +def secrets_storage_joined(relation_id=None): + relation_set(relation_id=relation_id, + secret_backend='charm-vaultlocker', + isolated=True, + access_address=get_relation_ip('secrets-storage'), + hostname=socket.gethostname()) + + +@hooks.hook('secrets-storage-relation-changed') +def secrets_storage_changed(): + vault_ca = relation_get('vault_ca') + if vault_ca: + vault_ca = base64.decodestring(json.loads(vault_ca).encode()) + write_file('/usr/local/share/ca-certificates/vault-ca.crt', + vault_ca, perms=0o644) + subprocess.check_call(['update-ca-certificates', '--fresh']) + prepare_disks_and_activate() + + VERSION_PACKAGE = 'ceph-common' @@ -559,6 +627,15 @@ def assess_status(): status_set('waiting', 'Incomplete relation: monitor') return + # Check for vault + if use_vaultlocker(): + if not relation_ids('secrets-storage'): + status_set('blocked', 'Missing relation: vault') + return + if not vaultlocker.vault_relation_complete(): + status_set('waiting', 'Incomplete relation: vault') + return + # Check for OSD device creation parity i.e. at least some devices # must have been presented and used for this charm to be operational running_osds = ceph.get_running_osds() diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..66beeda2 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,354 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=None): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait(timeout=timeout) + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) + # Check for ready messages + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) + + releases = { + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, + ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('trusty', 'icehouse'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder-ceph', + 'glance' + ] + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..ef785423 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1515 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib +import urlparse + +import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions + +import novaclient.client as nova_client +import novaclient +import pika +import swiftclient + +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) +from charmhelpers.core.host import CompareHostReleases + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, expected_num_eps=3): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != expected_num_eps: + return 'Unexpected number of endpoints found' + + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + @retry_on_exception(num_retries=5, base_delay=1) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel.get('api_version') != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel.get('api_version'), api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + + def authenticate_cinder_admin(self, keystone, api_version=2): + """Authenticates admin user with cinder.""" + self.log.debug('Authenticating cinder admin...') + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](session=keystone.session) + + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + else: + auth = v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: + user_domain_name = 'admin_domain' + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.glance.find_image(image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except Exception: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if CompareHostReleases(ubuntu_release) <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hooks/charmhelpers/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/hooks/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..9b088de8 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..6060ae50 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,265 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import json + +import re + +from charmhelpers.core.hookenv import ( + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, + WARNING, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + @returns dict: json encoded data for use with relation_set + """ + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data = { + 'resources': { + _haproxy_res: 'lsb:haproxy', + }, + 'resource_params': { + _haproxy_res: 'op monitor interval="5s"' + }, + 'init_services': { + _haproxy_res: 'haproxy' + }, + 'clones': { + 'cl_{}_haproxy'.format(service): _haproxy_res + }, + } + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) + + if iface is not None: + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vip_group: + if vip not in relation_data['resource_params'][vip_key]: + vip_key = '{}_{}'.format(vip_key, vip_params) + else: + log("Resource '%s' (vip='%s') already exists in " + "vip group - skipping" % (vip_key, vip), WARNING) + continue + + relation_data['resources'][vip_key] = res_neutron_vip + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) + + if len(vip_group) >= 1: + relation_data['groups'] = { + 'grp_{}_vips'.format(service): ' '.join(vip_group) + } diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..d7e02ccd --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..a623315d --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,379 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO, + TRACE +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=TRACE) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + + def __init__(self, config_file, contexts, config_template=None): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + self.config_template = config_template + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') + + def register(self, config_file, contexts, config_template=None): + """ + Register a config file with a list of context generators to be called + during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use + """ + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: {}'.format(config_file), level=ERROR) + raise OSConfigException + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 00000000..a8e4bf88 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hooks/secrets-storage-relation-broken b/hooks/secrets-storage-relation-broken new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/hooks/secrets-storage-relation-broken @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-changed b/hooks/secrets-storage-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/hooks/secrets-storage-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-departed b/hooks/secrets-storage-relation-departed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/hooks/secrets-storage-relation-departed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-joined b/hooks/secrets-storage-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/hooks/secrets-storage-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/lib/ceph/utils.py b/lib/ceph/utils.py index 8f72a843..4a158e03 100644 --- a/lib/ceph/utils.py +++ b/lib/ceph/utils.py @@ -1364,17 +1364,27 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): else: service_restart('ceph-mon-all') + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', hostname] if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate for all situations. + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. # LP#1719436 - cmd = ['ceph-create-keys', '--id', hostname, '--timeout', '1800'] - subprocess.check_call(cmd) + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' osstat = os.stat(_client_admin_keyring) if not osstat.st_size: diff --git a/metadata.yaml b/metadata.yaml index f75f6872..efcb7198 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -27,6 +27,8 @@ extra-bindings: requires: mon: interface: ceph-osd + secrets-storage: + interface: vault-kv storage: osd-devices: type: block diff --git a/templates/vaultlocker.conf.j2 b/templates/vaultlocker.conf.j2 new file mode 100644 index 00000000..5679f81b --- /dev/null +++ b/templates/vaultlocker.conf.j2 @@ -0,0 +1,6 @@ +# vaultlocker configuration from ceph-osd charm +[vault] +url = {{ vault_url }} +approle = {{ role_id }} +backend = {{ secret_backend }} +secret_id = {{ secret_id }} diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..ef785423 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import novaclient import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that diff --git a/unit_tests/test_ceph_hooks.py b/unit_tests/test_ceph_hooks.py index bc2cf132..a8d84766 100644 --- a/unit_tests/test_ceph_hooks.py +++ b/unit_tests/test_ceph_hooks.py @@ -517,3 +517,82 @@ class CephHooksTestCase(unittest.TestCase): subprocess.check_call.assert_called_once_with( ['udevadm', 'control', '--reload-rules'] ) + + +@patch.object(ceph_hooks, 'relation_get') +@patch.object(ceph_hooks, 'relation_set') +@patch.object(ceph_hooks, 'prepare_disks_and_activate') +@patch.object(ceph_hooks, 'get_relation_ip') +@patch.object(ceph_hooks, 'socket') +class SecretsStorageTestCase(unittest.TestCase): + + def test_secrets_storage_relation_joined(self, + _socket, + _get_relation_ip, + _prepare_disks_and_activate, + _relation_set, + _relation_get): + _get_relation_ip.return_value = '10.23.1.2' + _socket.gethostname.return_value = 'testhost' + ceph_hooks.secrets_storage_joined() + _get_relation_ip.assert_called_with('secrets-storage') + _relation_set.assert_called_with( + relation_id=None, + secret_backend='charm-vaultlocker', + isolated=True, + access_address='10.23.1.2', + hostname='testhost' + ) + _socket.gethostname.assert_called_once_with() + + def test_secrets_storage_relation_changed(self, + _socket, + _get_relation_ip, + _prepare_disks_and_activate, + _relation_set, + _relation_get): + _relation_get.return_value = None + ceph_hooks.secrets_storage_changed() + _prepare_disks_and_activate.assert_called_once_with() + + +@patch.object(ceph_hooks, 'cmp_pkgrevno') +@patch.object(ceph_hooks, 'config') +class VaultLockerTestCase(unittest.TestCase): + + def test_use_vaultlocker(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertTrue(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_no_encryption(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': False, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertFalse(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_not_vault(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'ceph', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertFalse(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_old_version(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = -1 + self.assertRaises(ValueError, + ceph_hooks.use_vaultlocker) diff --git a/unit_tests/test_status.py b/unit_tests/test_status.py index be13b420..c5c080a1 100644 --- a/unit_tests/test_status.py +++ b/unit_tests/test_status.py @@ -32,6 +32,8 @@ TO_PATCH = [ 'get_conf', 'application_version_set', 'get_upstream_version', + 'vaultlocker', + 'use_vaultlocker', ] CEPH_MONS = [ @@ -47,6 +49,7 @@ class ServiceStatusTestCase(test_utils.CharmTestCase): super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get self.get_upstream_version.return_value = '10.2.2' + self.use_vaultlocker.return_value = False def test_assess_status_no_monitor_relation(self): self.relation_ids.return_value = [] @@ -77,6 +80,40 @@ class ServiceStatusTestCase(test_utils.CharmTestCase): self.get_conf.return_value = 'monitor-bootstrap-key' self.ceph.get_running_osds.return_value = ['12345', '67890'] + self.get_upstream_version.return_value = '12.2.4' hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') + self.application_version_set.assert_called_with('12.2.4') + + def test_assess_status_monitor_vault_missing(self): + _test_relations = { + 'mon': ['mon:1'], + } + self.relation_ids.side_effect = lambda x: _test_relations.get(x, []) + self.related_units.return_value = CEPH_MONS + self.vaultlocker.vault_relation_complete.return_value = False + self.use_vaultlocker.return_value = True + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = ['12345', + '67890'] + self.get_upstream_version.return_value = '12.2.4' + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('12.2.4') + + def test_assess_status_monitor_vault_incomplete(self): + _test_relations = { + 'mon': ['mon:1'], + 'secrets-storage': ['secrets-storage:6'] + } + self.relation_ids.side_effect = lambda x: _test_relations.get(x, []) + self.related_units.return_value = CEPH_MONS + self.vaultlocker.vault_relation_complete.return_value = False + self.use_vaultlocker.return_value = True + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = ['12345', + '67890'] + self.get_upstream_version.return_value = '12.2.4' + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + self.application_version_set.assert_called_with('12.2.4')