diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e4cb06bc..8d1753c3 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2012-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. """Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood @@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name): def remove_deprecated_check(nrpe, deprecated_services): """ - Remove checks fro deprecated services in list + Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py index 7ea43f08..f7c6fbdc 100644 --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ Configuration stanzas:: type: boolean default: true description: > - If false, a volume is mounted as sepecified in "volume-map" + If false, a volume is mounted as specified in "volume-map" If true, ephemeral storage will be used, meaning that log data will only exist as long as the machine. YOU HAVE BEEN WARNED. volume-map: diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index ba34fba0..f0b629a2 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +86,7 @@ def is_elected_leader(resource): 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In + determined as being "the alive unit with the lowest unit number". In other words, the oldest surviving unit. """ try: @@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports, Return only the services and corresponding ports that are managed by this charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsability for stopping and starting + is because this charm passes responsibility for stopping and starting haproxy to hacluster. Similarly, if a relation with hacluster exists then the ports returned by diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/hooks/charmhelpers/contrib/hardening/host/templates/login.defs index db137d6d..7d107637 100644 --- a/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ b/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }} # # Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built +# overridden by PAM, since the default pam_unix module has it's own built # in of 3 retries. However, this is a safe fallback in case you are using # an authentication module that does not enforce PAM_MAXTRIES. # @@ -235,7 +235,7 @@ USERGROUPS_ENAB yes # # Instead of the real user shell, the program specified by this parameter # will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, +# The program may do whatever it wants (logging, additional authentication, # banner, ...) before running the actual shell. # # FAKE_SHELL /bin/fakeshell diff --git a/hooks/charmhelpers/contrib/hardening/utils.py b/hooks/charmhelpers/contrib/hardening/utils.py index ff7485c2..56afa4b6 100644 --- a/hooks/charmhelpers/contrib/hardening/utils.py +++ b/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Limited. +# Copyright 2016-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules): def _apply_overrides(settings, overrides, schema): - """Get overrides config overlayed onto modules defaults. + """Get overrides config overlaid onto modules defaults. :param modules: require stack modules config. :returns: dictionary of modules config with user overrides applied. diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 63e91cca..b356d64c 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None): @returns IPv6 or IPv4 address """ # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network + # For possible use as a fallback below with get_address_in_network try: # Get the interface specific IP address = network_get_primary_address(interface) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 94ca079c..00000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt', - 'ceilometer-agent'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('bionic', 'cloud:bionic-stein'): self.bionic_stein, - ('bionic', 'cloud:bionic-train'): self.bionic_train, - ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, - ('cosmic', None): self.cosmic_rocky, - ('disco', None): self.disco_stein, - ('eoan', None): self.eoan_train, - ('focal', None): self.focal_ussuri, - ('focal', 'cloud:focal-victoria'): self.focal_victoria, - ('groovy', None): self.groovy_victoria, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_percona_service_entry(self, memory_constraint=None): - """Return a amulet service entry for percona cluster. - - :param memory_constraint: Override the default memory constraint - in the service entry. - :type memory_constraint: str - :returns: Amulet service entry. - :rtype: dict - """ - memory_constraint = memory_constraint or '3072M' - svc_entry = { - 'name': 'percona-cluster', - 'constraints': {'mem': memory_constraint}} - if self._get_openstack_release() <= self.trusty_mitaka: - svc_entry['location'] = 'cs:trusty/percona-cluster' - return svc_entry - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 0a14af7e..00000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1595 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', - 'xenial_newton', 'yakkety_newton', - 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', - 'xenial_queens', 'bionic_queens', - 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein', - 'bionic_train', 'eoan_train', - 'bionic_ussuri', 'focal_ussuri', - 'focal_victoria', 'groovy_victoria', -] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone, force_v1_client=False): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if not force_v1_client and keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def glance_create_image(self, glance, image_name, image_url, - download_dir='tests', - hypervisor_type=None, - disk_format='qcow2', - architecture='x86_64', - container_format='bare'): - """Download an image and upload it to glance, validate its status - and return an image object pointer. KVM defaults, can override for - LXD. - - :param glance: pointer to authenticated glance api connection - :param image_name: display name for new image - :param image_url: url to retrieve - :param download_dir: directory to store downloaded image file - :param hypervisor_type: glance image hypervisor property - :param disk_format: glance image disk format - :param architecture: glance image architecture property - :param container_format: glance image container format - :returns: glance image pointer - """ - self.log.debug('Creating glance image ({}) from ' - '{}...'.format(image_name, image_url)) - - # Download image - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - abs_file_name = os.path.join(download_dir, image_name) - if not os.path.exists(abs_file_name): - opener.retrieve(image_url, abs_file_name) - - # Create glance image - glance_properties = { - 'architecture': architecture, - } - if hypervisor_type: - glance_properties['hypervisor_type'] = hypervisor_type - # Create glance image - if float(glance.version) < 2.0: - with open(abs_file_name) as f: - image = glance.images.create( - name=image_name, - is_public=True, - disk_format=disk_format, - container_format=container_format, - properties=glance_properties, - data=f) - else: - image = glance.images.create( - name=image_name, - visibility="public", - disk_format=disk_format, - container_format=container_format) - glance.images.upload(image.id, open(abs_file_name, 'rb')) - glance.images.update(image.id, **glance_properties) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == container_format \ - and val_img_dfmt == disk_format: - self.log.debug(msg_attr) - else: - msg = ('Image validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def create_cirros_image(self, glance, image_name, hypervisor_type=None): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :param hypervisor_type: glance image hypervisor property - :returns: glance image pointer - """ - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'glance_create_image instead of ' - 'create_cirros_image.') - - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Get cirros image URL - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - f.close() - - return self.glance_create_image( - glance, - image_name, - cirros_url, - hypervisor_type=hypervisor_type) - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py index 703fc6ef..5c961c58 100644 --- a/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2018 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Common python helper functions used for OpenStack charm certificats. +# Common python helper functions used for OpenStack charm certificates. import os import json @@ -71,7 +71,7 @@ class CertRequest(object): def add_entry(self, net_type, cn, addresses): """Add a request to the batch - :param net_type: str netwrok space name request is for + :param net_type: str network space name request is for :param cn: str Canonical Name for certificate :param addresses: [] List of addresses to be used as SANs """ @@ -85,7 +85,7 @@ class CertRequest(object): addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units - # cert has the approriate vip in the SAN list + # cert has the appropriate vip in the SAN list vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) @@ -178,7 +178,7 @@ def get_certificate_request(json_encode=True, bindings=None): except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " "local address found".format(binding), WARNING) - # Gurantee all SANs are covered + # Guarantee all SANs are covered # These are network addresses with no corresponding hostname. # Add the ips to the hostname cert to allow for this. req.add_hostname_cn_ip(_sans) @@ -357,7 +357,7 @@ def process_certificates(service_name, relation_id, unit, bindings=None): """Process the certificates supplied down the relation - :param service_name: str Name of service the certifcates are for. + :param service_name: str Name of service the certificates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index b67dafda..54081f0c 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +25,10 @@ import socket import time from base64 import b64decode -from subprocess import check_call, CalledProcessError +from subprocess import ( + check_call, + check_output, + CalledProcessError) import six @@ -453,18 +456,24 @@ class IdentityServiceContext(OSContextGenerator): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host + int_host = rdata.get('internal_host') + int_host = format_ipv6_addr(int_host) or int_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + int_protocol = rdata.get('internal_protocol') or 'http' api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), + 'internal_host': int_host, + 'internal_port': rdata.get('internal_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, + 'internal_protocol': int_protocol, 'api_version': api_version}) if float(api_version) > 2: @@ -1358,7 +1367,7 @@ class NeutronPortContext(OSContextGenerator): mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address + # NIC is in known NICs and does NOT have an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): @@ -1781,6 +1790,10 @@ class NeutronAPIContext(OSContextGenerator): 'rel_key': 'enable-port-forwarding', 'default': False, }, + 'enable_fwaas': { + 'rel_key': 'enable-fwaas', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1815,6 +1828,11 @@ class NeutronAPIContext(OSContextGenerator): if ctxt['enable_port_forwarding']: l3_extension_plugins.append('port_forwarding') + if ctxt['enable_fwaas']: + l3_extension_plugins.append('fwaas_v2') + if ctxt['enable_nfg_logging']: + l3_extension_plugins.append('fwaas_v2_log') + ctxt['l3_extension_plugins'] = l3_extension_plugins return ctxt @@ -2379,6 +2397,12 @@ class DHCPAgentContext(OSContextGenerator): ctxt['enable_metadata_network'] = True ctxt['enable_isolated_metadata'] = True + ctxt['append_ovs_config'] = False + cmp_release = CompareOpenStackReleases( + os_release('neutron-common', base='icehouse')) + if cmp_release >= 'queens' and config('enable-dpdk'): + ctxt['append_ovs_config'] = True + return ctxt @staticmethod @@ -2570,22 +2594,48 @@ class OVSDPDKDeviceContext(OSContextGenerator): :returns: hex formatted CPU mask :rtype: str """ - num_cores = config('dpdk-socket-cores') - mask = 0 + return self.cpu_masks()['dpdk_lcore_mask'] + + def cpu_masks(self): + """Get hex formatted CPU masks + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit, followed by the + next config:pmd-socket-cores + + :returns: Dict of hex formatted CPU masks + :rtype: Dict[str, str] + """ + num_lcores = config('dpdk-socket-cores') + pmd_cores = config('pmd-socket-cores') + lcore_mask = 0 + pmd_mask = 0 for cores in self._numa_node_cores().values(): - for core in cores[:num_cores]: - mask = mask | 1 << core - return format(mask, '#04x') + for core in cores[:num_lcores]: + lcore_mask = lcore_mask | 1 << core + for core in cores[num_lcores:][:pmd_cores]: + pmd_mask = pmd_mask | 1 << core + return { + 'pmd_cpu_mask': format(pmd_mask, '#04x'), + 'dpdk_lcore_mask': format(lcore_mask, '#04x')} def socket_memory(self): - """Formatted list of socket memory configuration per NUMA node + """Formatted list of socket memory configuration per socket. - :returns: socket memory configuration per NUMA node + :returns: socket memory configuration per socket. :rtype: str """ + lscpu_out = check_output( + ['lscpu', '-p=socket']).decode('UTF-8').strip() + sockets = set() + for line in lscpu_out.split('\n'): + try: + sockets.add(int(line)) + except ValueError: + # lscpu output is headed by comments so ignore them. + pass sm_size = config('dpdk-socket-memory') - node_regex = '/sys/devices/system/node/node*' - mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + mem_list = [str(sm_size) for _ in sockets] if mem_list: return ','.join(mem_list) else: @@ -2650,7 +2700,7 @@ class OVSDPDKDeviceContext(OSContextGenerator): class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interaces from charm configuration. + """Build a map of bridge ports and interfaces from charm configuration. NOTE: the handling of this detail in the charm is pre-deprecated. @@ -3099,7 +3149,7 @@ class SRIOVContext(OSContextGenerator): actual = min(int(requested), int(device.sriov_totalvfs)) if actual < int(requested): log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supprted by device: {}' + 'to value supported by device: {}' .format(requested, device.interface_name, device.sriov_totalvfs), level=WARNING) diff --git a/hooks/charmhelpers/contrib/openstack/deferred_events.py b/hooks/charmhelpers/contrib/openstack/deferred_events.py index 8765ee31..94eacf6c 100644 --- a/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -244,7 +244,7 @@ def get_deferred_restarts(): def clear_deferred_restarts(services): - """Clear deferred restart events targetted at `services`. + """Clear deferred restart events targeted at `services`. :param services: Services with deferred actions to clear. :type services: List[str] @@ -253,7 +253,7 @@ def clear_deferred_restarts(services): def process_svc_restart(service): - """Respond to a service restart having occured. + """Respond to a service restart having occurred. :param service: Services that the action was performed against. :type service: str diff --git a/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py index 344a7662..431e972b 100755 --- a/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ b/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This script is an implemenation of policy-rc.d +"""This script is an implementation of policy-rc.d For further information on policy-rc.d see *1 diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index fb5607f3..b41314cb 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Various utilies for dealing with Neutron and the renaming from Quantum. +# Various utilities for dealing with Neutron and the renaming from Quantum. import six from subprocess import check_output @@ -251,7 +251,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): def network_manager(): ''' Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, + that require compatibility (eg, deploying H with network-manager=quantum, upgrading from G). ''' release = os_release('nova-common') diff --git a/hooks/charmhelpers/contrib/openstack/policyd.py b/hooks/charmhelpers/contrib/openstack/policyd.py index f2bb21e9..6fa06f26 100644 --- a/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/hooks/charmhelpers/contrib/openstack/policyd.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ provided: The functions should be called from the install and upgrade hooks in the charm. The `maybe_do_policyd_overrides_on_config_changed` function is designed to be called on the config-changed hook, in that it does an additional check to -ensure that an already overriden policy.d in an upgrade or install hooks isn't +ensure that an already overridden policy.d in an upgrade or install hooks isn't repeated. In order the *enable* this functionality, the charm's install, config_changed, @@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): """This function is designed to be called from the config changed hook. diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index d36af2aa..875e1393 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -1,10 +1,22 @@ global - log /var/lib/haproxy/dev/log local0 - log /var/lib/haproxy/dev/log local1 notice + # NOTE: on startup haproxy chroot's to /var/lib/haproxy. + # + # Unfortunately the program will open some files prior to the call to + # chroot never to reopen them, and some after. So looking at the on-disk + # layout of haproxy resources you will find some resources relative to / + # such as the admin socket, and some relative to /var/lib/haproxy such as + # the log socket. + # + # The logging socket is (re-)opened after the chroot and must be relative + # to /var/lib/haproxy. + log /dev/log local0 + log /dev/log local1 notice maxconn 20000 user haproxy group haproxy spread-checks 0 + # The admin socket is opened prior to the chroot never to be reopened, so + # it lives outside the chroot directory in the filesystem. stats socket /var/run/haproxy/admin.sock mode 600 level admin stats timeout 2m diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index 23b62a38..b9ca3963 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -15,7 +15,7 @@ Listen {{ public_port }} {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} @@ -41,7 +41,7 @@ Listen {{ public_port }} {% if admin_port -%} WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} @@ -67,7 +67,7 @@ Listen {{ public_port }} {% if public_port -%} WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index 23b62a38..b9ca3963 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -15,7 +15,7 @@ Listen {{ public_port }} {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} @@ -41,7 +41,7 @@ Listen {{ public_port }} {% if admin_port -%} WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} @@ -67,7 +67,7 @@ Listen {{ public_port }} {% if public_port -%} WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 1656bd43..d5d301e6 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ from charmhelpers.fetch import ( filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, + OPENSTACK_RELEASES, + UBUNTU_OPENSTACK_RELEASE, ) from charmhelpers.fetch.snap import ( @@ -132,54 +134,9 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), -]) - - OPENSTACK_CODENAMES = OrderedDict([ + # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version + # number. This just means the i-th version of the year yyyy. ('2011.2', 'diablo'), ('2012.1', 'essex'), ('2012.2', 'folsom'), @@ -200,6 +157,8 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2020.1', 'ussuri'), ('2020.2', 'victoria'), ('2021.1', 'wallaby'), + ('2021.2', 'xena'), + ('2022.1', 'yoga'), ]) # The ugly duckling - must list releases oldest to newest @@ -701,7 +660,7 @@ def import_key(keyid): def get_source_and_pgp_key(source_and_key): """Look for a pgp key ID or ascii-armor key in the given input. - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + :param source_and_key: String, "source_spec|keyid" where '|keyid' is optional. :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id if there was no '|' in the source_and_key string. @@ -721,7 +680,7 @@ def configure_installation_source(source_plus_key): The functionality is provided by charmhelpers.fetch.add_source() The difference between the two functions is that add_source() signature requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation + optional key by appending '|' to the end of the source specification 'source'. Another difference from add_source() is that the function calls sys.exit(1) @@ -808,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'): def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been recieved for an endpoint. + """Whether a new notification has been received for an endpoint. :param service_name: Service name eg nova, neutron, placement etc :type service_name: str @@ -834,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'): def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the enpoint triggers in db so it can be tracked if they changed. + """Save the endpoint triggers in db so it can be tracked if they changed. :param service_names: List of service name. :type service_name: List @@ -1502,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None): if remote_service: trigger['remote-service'] = remote_service for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using + # This subordinate can be related to two separate services using # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is + # the principle is connected down the relation we think it is if related_units(relid=rid): relation_set(relation_id=rid, relation_settings=trigger, @@ -1621,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was succesfull it should return + indicate that the function failed. If it was successful it should return None or an optional message. The signature for charm_func is: @@ -1880,7 +1839,7 @@ def pausable_restart_on_change(restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: decorator to use a restart_on_change with pausability :rtype: decorator diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4ee6c1db..e5418c39 100644 --- a/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -1,4 +1,4 @@ -# Copyright 2018 Canonical Limited. +# Copyright 2018-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ class VaultKVContext(context.OSContextGenerator): "but it's not available. Is secrets-stroage relation " "made, but encrypt option not set?", level=hookenv.WARNING) - # return an emptry context on hvac import error + # return an empty context on hvac import error return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index d1c61754..3eb46d70 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Copyright 2012 Canonical Ltd. -# # This file is sourced from lp:openstack-charm-helpers # # Authors: @@ -605,7 +602,7 @@ class BasePool(object): class Pool(BasePool): - """Compability shim for any descendents external to this library.""" + """Compatibility shim for any descendents external to this library.""" @deprecate( 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') @@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" + """Determine whether a filesystem is already mounted.""" return fs in [f for f, m in mounts()] @@ -1904,7 +1901,7 @@ class CephBrokerRq(object): set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overriden + :param allow_ec_overwrites: allow EC pools to be overridden :type allow_ec_overwrites: bool :raises: AssertionError if provided data is of invalid type/range """ @@ -1949,7 +1946,7 @@ class CephBrokerRq(object): :param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) :type lrc_locality: int - :param durability_estimator: The number of parity chuncks each of which includes + :param durability_estimator: The number of parity chunks each of which includes a data chunk in its calculation range (shec plugin) :type durability_estimator: int :param helper_chunks: The number of helper chunks to use for recovery operations @@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext): settings are in conf['osd_from_client'] and finally settings which do clash are in conf['osd_from_client_conflict']. Rather than silently drop the conflicting settings they are provided in the context so they can be - rendered commented out to give some visability to the admin. + rendered commented out to give some visibility to the admin. """ def __init__(self, permitted_sections=None): diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index c8bde692..d0a57211 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ from subprocess import ( ################################################## def deactivate_lvm_volume_group(block_device): ''' - Deactivate any volume gruop associated with an LVM physical volume. + Deactivate any volume group associated with an LVM physical volume. :param block_device: str: Full path to LVM physical volume ''' diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 47eebb51..e94247a2 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2013-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. "Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. # # Authors: # Charm Helpers Developers @@ -610,7 +609,7 @@ def expected_related_units(reltype=None): relation_type())) :param reltype: Relation type to list data for, default is to list data for - the realtion type we are currently executing a hook for. + the relation type we are currently executing a hook for. :type reltype: str :returns: iterator :rtype: types.GeneratorType @@ -627,7 +626,7 @@ def expected_related_units(reltype=None): @cached def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" + """Get the json representation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None): def _contains_range(addresses): """Check for cidr or wildcard domain in a string. - Given a string comprising a comma seperated list of ip addresses + Given a string comprising a comma separated list of ip addresses and domain names, determine whether the string contains IP ranges or wildcard domains. - :param addresses: comma seperated list of domains and ip addresses. + :param addresses: comma separated list of domains and ip addresses. :type addresses: str """ return ( diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d25e6c59..994ec8a0 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service. + Re-enable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir @@ -727,7 +727,7 @@ class restart_on_change(object): :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ self.restart_map = restart_map @@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: result of lambda_f() :rtype: ANY @@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: @@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums, def pwgen(length=None): - """Generate a random pasword.""" + """Generate a random password.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py index 5aa4196d..e710c0e0 100644 --- a/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -28,6 +28,7 @@ UBUNTU_RELEASES = ( 'focal', 'groovy', 'hirsute', + 'impish', ) diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py index e8df0452..28c6b3f5 100644 --- a/hooks/charmhelpers/core/strutils.py +++ b/hooks/charmhelpers/core/strutils.py @@ -18,8 +18,11 @@ import six import re +TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} +FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} -def bool_from_string(value): + +def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): """Interpret string value as boolean. Returns True if value translates to True otherwise False. @@ -32,9 +35,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't', 'on']: + if value in truthy_strings: return True - elif value in ['n', 'no', 'false', 'f', 'off']: + elif value in falsey_strings or assume_false: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index ab554327..d9b8d0b0 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ Here's a fully worked integration example using hookenv.Hooks:: 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) @@ -449,7 +449,7 @@ class HookData(object): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 5b689f5b..9497ee05 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ if __platform__ == "ubuntu": apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env get_installed_version = fetch.get_installed_version + OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES + UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE elif __platform__ == "centos": yum_search = fetch.yum_search @@ -203,7 +205,7 @@ def plugins(fetch_handlers=None): classname) plugin_list.append(handler_class()) except NotImplementedError: - # Skip missing plugins so that they can be ommitted from + # Skip missing plugins so that they can be omitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( handler_name)) diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py index 6e95028b..60048354 100644 --- a/hooks/charmhelpers/fetch/python/packages.py +++ b/hooks/charmhelpers/fetch/python/packages.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ __author__ = "Jorge Niedbalski " def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. + """Overridden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. @@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" if six.PY2: apt_install('python-virtualenv') + extra_flags = [] else: - apt_install('python3-virtualenv') + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path @@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None): venv_path = os.path.join(charm_dir(), 'venv') if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) + subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py index fc70aa94..36d6bce9 100644 --- a/hooks/charmhelpers/fetch/snap.py +++ b/hooks/charmhelpers/fetch/snap.py @@ -1,4 +1,4 @@ -# Copyright 2014-2017 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ def _snap_exec(commands): retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' + 'Could not acquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py index 812a11a2..6c7cf6fc 100644 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -208,12 +208,79 @@ CLOUD_ARCHIVE_POCKETS = { 'wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby', + # Xena + 'xena': 'focal-updates/xena', + 'focal-xena': 'focal-updates/xena', + 'focal-xena/updates': 'focal-updates/xena', + 'focal-updates/xena': 'focal-updates/xena', + 'xena/proposed': 'focal-proposed/xena', + 'focal-xena/proposed': 'focal-proposed/xena', + 'focal-proposed/xena': 'focal-proposed/xena', + # Yoga + 'yoga': 'focal-updates/yoga', + 'focal-yoga': 'focal-updates/yoga', + 'focal-yoga/updates': 'focal-updates/yoga', + 'focal-updates/yoga': 'focal-updates/yoga', + 'yoga/proposed': 'focal-proposed/yoga', + 'focal-yoga/proposed': 'focal-proposed/yoga', + 'focal-proposed/yoga': 'focal-proposed/yoga', } +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', + 'victoria', + 'wallaby', + 'xena', + 'yoga', +) + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), + ('impish', 'xena'), +]) + + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -246,9 +313,9 @@ def filter_missing_packages(packages): def apt_cache(*_, **__): """Shim returning an object simulating the apt_pkg Cache. - :param _: Accept arguments for compability, not used. + :param _: Accept arguments for compatibility, not used. :type _: any - :param __: Accept keyword arguments for compability, not used. + :param __: Accept keyword arguments for compatibility, not used. :type __: any :returns:Object used to interrogate the system apt and dpkg databases. :rtype:ubuntu_apt_pkg.Cache @@ -283,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool - :param quiet: if True (default), supress log message to stdout/stderr + :param quiet: if True (default), suppress log message to stdout/stderr :type quiet: bool :raises: subprocess.CalledProcessError """ @@ -397,7 +464,7 @@ def import_key(key): A Radix64 format keyid is also supported for backwards compatibility. In this case Ubuntu keyserver will be queried for a key via HTTPS by its keyid. This method - is less preferrable because https proxy servers may + is less preferable because https proxy servers may require traffic decryption which is equivalent to a man-in-the-middle attack (a proxy server impersonates keyserver TLS certificates and has to be explicitly @@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False): with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. + '': translate to cloud: based on the current + distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or + 'distro'. + '/proposed': as above, but for proposed. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. @@ -592,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False): id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. + are securely added automatically, so should not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False): @raises SourceConfigError() if for cloud:, the is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ + # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use + # the list in contrib.openstack.utils as it might not be included in + # classic charms and would break everything. Having OpenStack specific + # code in this file is a bit of an antipattern, anyway. + os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) + _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), @@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + (r"^{}\/proposed$".format(os_versions_regex), + _add_bare_openstack_proposed), + (r"^{}$".format(os_versions_regex), _add_bare_openstack), ]) if source is None: source = '' @@ -640,7 +720,7 @@ def _add_proposed(): Uses get_distrib_codename to determine the correct stanza for the deb line. - For intel architecutres PROPOSED_POCKET is used for the release, but for + For Intel architectures PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ release = get_distrib_codename() @@ -662,7 +742,8 @@ def _add_apt_repository(spec): series = get_distrib_codename() spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) + ) def _add_cloud_pocket(pocket): @@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release): 'version ({})'.format(release, os_release, ubuntu_rel)) +def _add_bare_openstack(openstack_release): + """Add cloud or distro based on the release given. + + The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri + or 'distro' depending on whether the ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + # TODO(ajkavanagh) - surely this means we should be removing cloud archives + # if they exist? + __add_bare_helper(openstack_release, "{}-{}", lambda: None) + + +def _add_bare_openstack_proposed(openstack_release): + """Add cloud of distro but with proposed. + + The spec given is, say, 'ussuri' but this could apply + cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the + ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) + + +def __add_bare_helper(openstack_release, pocket_format, final_function): + """Helper for _add_bare_openstack[_proposed] + + The bulk of the work between the two functions is exactly the same except + for the pocket format and the function that is run if it's the distro + version. + + :param openstack_release: the OpenStack codename. e.g. ussuri + :type openstack_release: str + :param pocket_format: the pocket formatter string to construct a pocket str + from the openstack_release and the current ubuntu version. + :type pocket_format: str + :param final_function: the function to call if it is the distro version. + :type final_function: Callable + :raises SourceConfigError on error + """ + ubuntu_version = get_distrib_codename() + possible_pocket = pocket_format.format(ubuntu_version, openstack_release) + if possible_pocket in CLOUD_ARCHIVE_POCKETS: + _add_cloud_pocket(possible_pocket) + return + # Otherwise it's almost certainly the distro version; verify that it + # exists. + try: + assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release + except KeyError: + raise SourceConfigError( + "Invalid ubuntu version {} isn't known to this library" + .format(ubuntu_version)) + except AssertionError: + raise SourceConfigError( + 'Invalid OpenStack release specified: {} for Ubuntu version {}' + .format(openstack_release, ubuntu_version)) + final_function() + + def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. diff --git a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index a2fbe0e5..436e1776 100644 --- a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ sys.modules[__name__].config = Config() def init(): - """Compability shim that does nothing.""" + """Compatibility shim that does nothing.""" pass @@ -264,7 +264,7 @@ def version_compare(a, b): else: raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' - 'less than each other.') + 'less than each other.'.format(a, b)) class PkgVersion(): diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py index 78c81af5..1ace468f 100644 --- a/hooks/charmhelpers/osplatform.py +++ b/hooks/charmhelpers/osplatform.py @@ -28,6 +28,9 @@ def get_platform(): elif "elementary" in current_platform: # ElementaryOS fails to run tests locally without this. return "ubuntu" + elif "Pop!_OS" in current_platform: + # Pop!_OS also fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/lib/charms_ceph/crush_utils.py b/lib/charms_ceph/crush_utils.py index 8fe09fa4..37084bf1 100644 --- a/lib/charms_ceph/crush_utils.py +++ b/lib/charms_ceph/crush_utils.py @@ -79,9 +79,9 @@ class Crushmap(object): stdin=crush.stdout) .decode('UTF-8')) except CalledProcessError as e: - log("Error occured while loading and decompiling CRUSH map:" + log("Error occurred while loading and decompiling CRUSH map:" "{}".format(e), ERROR) - raise "Failed to read CRUSH map" + raise def ensure_bucket_is_present(self, bucket_name): if bucket_name not in [bucket.name for bucket in self.buckets()]: @@ -111,7 +111,7 @@ class Crushmap(object): return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) - raise "Failed to save CRUSH map." + raise def build_crushmap(self): """Modifies the current CRUSH map to include the new buckets""" diff --git a/lib/charms_ceph/utils.py b/lib/charms_ceph/utils.py index e5c38793..9b7299dd 100644 --- a/lib/charms_ceph/utils.py +++ b/lib/charms_ceph/utils.py @@ -14,6 +14,7 @@ import collections import glob +import itertools import json import os import pyudev @@ -24,6 +25,7 @@ import subprocess import sys import time import uuid +import functools from contextlib import contextmanager from datetime import datetime @@ -501,30 +503,33 @@ def ceph_user(): class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name + def __init__(self, identifier, name, osd="", host="", chassis="", + rack="", row="", pdu="", pod="", room="", + datacenter="", zone="", region="", root=""): self.identifier = identifier + self.name = name + self.osd = osd self.host = host + self.chassis = chassis self.rack = rack self.row = row + self.pdu = pdu + self.pod = pod + self.room = room self.datacenter = datacenter - self.chassis = chassis + self.zone = zone + self.region = region self.root = root def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) + return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ + "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ + "region: {} root: {}".format(self.name, self.identifier, + self.osd, self.host, self.chassis, + self.rack, self.row, self.pdu, + self.pod, self.room, + self.datacenter, self.zone, + self.region, self.root) def __eq__(self, other): return not self.name < other.name and not other.name < self.name @@ -571,10 +576,53 @@ def get_osd_weight(osd_id): raise +def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): + """Get all nodes of the desired type, with all their attributes. + + These attributes can be direct or inherited from ancestors. + """ + attribute_dict = {node['type']: node['name']} + if node['type'] == lookup_type: + attribute_dict['name'] = node['name'] + attribute_dict['identifier'] = node['id'] + return [attribute_dict] + elif not node.get('children'): + return [attribute_dict] + else: + descendant_attribute_dicts = [ + _filter_nodes_and_set_attributes(node_lookup_map[node_id], + node_lookup_map, lookup_type) + for node_id in node.get('children', []) + ] + return [dict(attribute_dict, **descendant_attribute_dict) + for descendant_attribute_dict + in itertools.chain.from_iterable(descendant_attribute_dicts)] + + +def _flatten_roots(nodes, lookup_type='host'): + """Get a flattened list of nodes of the desired type. + + :param nodes: list of nodes defined as a dictionary of attributes and + children + :type nodes: List[Dict[int, Any]] + :param lookup_type: type of searched node + :type lookup_type: str + :returns: flattened list of nodes + :rtype: List[Dict[str, Any]] + """ + lookup_map = {node['id']: node for node in nodes} + root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, + lookup_type) + for node in nodes if node['type'] == 'root'] + # get a flattened list of roots. + return list(itertools.chain.from_iterable(root_attributes_dicts)) + + def get_osd_tree(service): """Returns the current osd map in JSON. :returns: List. + :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails """ @@ -585,35 +633,14 @@ def get_osd_tree(service): .decode('UTF-8')) try: json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - host_nodes = [ - node for node in json_tree['nodes'] - if node['type'] == 'host' - ] - for host in host_nodes: - crush_list.append( - CrushLocation( - name=host.get('name'), - identifier=host['id'], - host=host.get('host'), - rack=host.get('rack'), - row=host.get('row'), - datacenter=host.get('datacenter'), - chassis=host.get('chassis'), - root=host.get('root') - ) - ) - return crush_list + roots = _flatten_roots(json_tree["nodes"]) + return [CrushLocation(**host) for host in roots] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) + log("ceph osd tree command failed with message: {}".format(e)) raise @@ -669,7 +696,9 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): + if (_is_int(osd_id) and + filesystem_mounted(os.path.join( + os.sep, osd_path, osd_dir))): osd_ids.append(osd_id) except OSError: raise @@ -3271,13 +3300,14 @@ def determine_packages(): def determine_packages_to_remove(): """Determines packages for removal + Note: if in a container, then the CHRONY_PACKAGE is removed. + :returns: list of packages to be removed + :rtype: List[str] """ rm_packages = REMOVE_PACKAGES.copy() if is_container(): - install_list = filter_missing_packages(CHRONY_PACKAGE) - if not install_list: - rm_packages.append(CHRONY_PACKAGE) + rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) return rm_packages @@ -3376,3 +3406,132 @@ def apply_osd_settings(settings): level=ERROR) raise OSDConfigSetError return True + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in enabled_manager_modules() + + +is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') + + +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if not is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + +mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') + + +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + +mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') + + +def ceph_config_set(name, value, who): + """Set a ceph config option + + :param name: key to set + :type name: str + :param value: value corresponding to key + :type value: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + + :raises: subprocess.CalledProcessError + """ + subprocess.check_call(['ceph', 'config', 'set', who, name, value]) + + +mgr_config_set = functools.partial(ceph_config_set, who='mgr') + + +def ceph_config_get(name, who): + """Retrieve the value of a ceph config option + + :param name: key to lookup + :type name: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + :returns: Value associated with key + :rtype: str + :raises: subprocess.CalledProcessError + """ + return subprocess.check_output( + ['ceph', 'config', 'get', who, name]).decode('UTF-8') + + +mgr_config_get = functools.partial(ceph_config_get, who='mgr') + + +def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): + """Set SSL dashboard config option. + + :param path: Path to file + :type path: str + :param artifact_name: Option name for setting the artifact + :type artifact_name: str + :param hostname: If hostname is set artifact will only be associated with + the dashboard on that host. + :type hostname: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', 'dashboard', artifact_name] + if hostname: + cmd.append(hostname) + cmd.extend(['-i', path]) + log(cmd, level=DEBUG) + subprocess.check_call(cmd) + + +dashboard_set_ssl_certificate = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate') + + +dashboard_set_ssl_certificate_key = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate-key') diff --git a/osci.yaml b/osci.yaml index 64b884b0..cc369b42 100644 --- a/osci.yaml +++ b/osci.yaml @@ -3,10 +3,16 @@ - charm-unit-jobs check: jobs: + - vault-impish-xena_rgw: + voting: false + - vault-impish-xena-namespaced: + voting: false - vault-hirsute-wallaby_rgw - vault-hirsute-wallaby-namespaced - - vault-groovy-victoria_rgw - - vault-groovy-victoria-namespaced + - vault-focal-xena_rgw: + voting: false + - vault-focal-xena-namespaced: + voting: false - vault-focal-wallaby_rgw - vault-focal-wallaby-namespaced - vault-focal-victoria_rgw @@ -37,10 +43,22 @@ vars: tox_extra_args: vault:bionic-ussuri - job: - name: vault-hirsute-wallaby_rgw + name: vault-impish-xena_rgw parent: func-target dependencies: &smoke-jobs - vault-bionic-ussuri + vars: + tox_extra_args: vault:impish-xena +- job: + name: vault-impish-xena-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:impish-xena-namespaced +- job: + name: vault-hirsute-wallaby_rgw + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: vault:hirsute-wallaby - job: @@ -50,17 +68,17 @@ vars: tox_extra_args: vault:hirsute-wallaby-namespaced - job: - name: vault-groovy-victoria_rgw + name: vault-focal-xena_rgw parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: vault:groovy-victoria + tox_extra_args: vault:focal-xena - job: - name: vault-groovy-victoria-namespaced + name: vault-focal-xena-namespaced parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: vault:groovy-victoria-namespaced + tox_extra_args: vault:focal-xena-namespaced - job: name: vault-focal-wallaby_rgw parent: func-target diff --git a/pip.sh b/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/tests/bundles/focal-xena-namespaced.yaml b/tests/bundles/focal-xena-namespaced.yaml new file mode 100644 index 00000000..ad973e41 --- /dev/null +++ b/tests/bundles/focal-xena-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/tests/bundles/focal-xena.yaml b/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..5a590e0c --- /dev/null +++ b/tests/bundles/focal-xena.yaml @@ -0,0 +1,116 @@ +options: + source: &source cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/tests/bundles/impish-xena-namespaced.yaml b/tests/bundles/impish-xena-namespaced.yaml new file mode 100644 index 00000000..a748f555 --- /dev/null +++ b/tests/bundles/impish-xena-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/tests/bundles/impish-xena.yaml b/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..49d34ea0 --- /dev/null +++ b/tests/bundles/impish-xena.yaml @@ -0,0 +1,116 @@ +options: + source: &source distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/tests/tests.yaml b/tests/tests.yaml index d4145091..c6c3ddeb 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -1,8 +1,8 @@ charm_name: ceph-radosgw gate_bundles: - - vault: groovy-victoria - - vault: groovy-victoria-namespaced + - vault: focal-xena + - vault: focal-xena-namespaced - vault: focal-wallaby - vault: focal-wallaby-namespaced - vault: focal-victoria @@ -33,8 +33,12 @@ dev_bundles: - bionic-rocky-multisite - vault: bionic-rocky - vault: bionic-rocky-namespaced + - vault: groovy-victoria + - vault: groovy-victoria-namespaced - vault: hirsute-wallaby - vault: hirsute-wallaby-namespaced + - vault: impish-xena + - vault: impish-xena-namespaced target_deploy_status: vault: @@ -57,3 +61,7 @@ tests_options: force_deploy: - hirsute-wallaby - hirsute-wallaby-namespaced + - groovy-victoria + - groovy-victoria-namespaced + - impish-xena + - impish-xena-namespaced diff --git a/tox.ini b/tox.ini index 9ba3f9fe..ba4fd5b6 100644 --- a/tox.ini +++ b/tox.ini @@ -22,19 +22,22 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt diff --git a/unit_tests/test_ceph_radosgw_context.py b/unit_tests/test_ceph_radosgw_context.py index aff7e712..3f0decd2 100644 --- a/unit_tests/test_ceph_radosgw_context.py +++ b/unit_tests/test_ceph_radosgw_context.py @@ -15,7 +15,6 @@ from mock import patch import ceph_radosgw_context as context -import charmhelpers import charmhelpers.contrib.storage.linux.ceph as ceph import charmhelpers.fetch as fetch @@ -69,290 +68,6 @@ class HAProxyContextTests(CharmTestCase): self.assertEqual(expect, haproxy_context()) -class IdentityServiceContextTest(CharmTestCase): - - def setUp(self): - super(IdentityServiceContextTest, self).setUp(context, TO_PATCH) - self.relation_get.side_effect = self.test_relation.get - self.config.side_effect = self.test_config.get - self.maxDiff = None - self.cmp_pkgrevno.return_value = 1 - self.leader_get.return_value = 'False' - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, - _format_ipv6_addr, _filter_installed_packages, - jewel_installed=False, cmp_pkgrevno_side_effects=None): - self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects - if cmp_pkgrevno_side_effects - else [-1, 1, -1]) - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = 'rid1' - _runits.return_value = 'runit' - _ctxt_comp.return_value = True - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: - expect['user_roles'] = 'Babel' - expect['admin_roles'] = 'Dart' - else: - expect['user_roles'] = 'Babel,Dart' - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: - expect['keystone_revocation_parameter_supported'] = True - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, - _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, - jewel_installed=False, - cmp_pkgrevno_side_effects=None): - self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects - if cmp_pkgrevno_side_effects - else [-1, 1, -1]) - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = 'rid1' - _runits.return_value = 'runit' - _ctxt_comp.return_value = True - self.leader_get.return_value = 'True' - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': True, - 'cache_size': '42', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: - expect['user_roles'] = 'Babel' - expect['admin_roles'] = 'Dart' - else: - expect['user_roles'] = 'Babel,Dart' - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: - expect['keystone_revocation_parameter_supported'] = True - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_missing_admin_domain_id( - self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, jewel_installed=False): - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = ['rid1'] - _runits.return_value = ['runit'] - _ctxt_comp.return_value = True - self.cmp_pkgrevno.return_value = -1 - if jewel_installed: - self.cmp_pkgrevno.return_value = 0 - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'keystone_revocation_parameter_supported': True, - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - 'user_roles': 'Babel,Dart', - } - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_v3( - self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, jewel_installed=False): - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = ['rid1'] - _runits.return_value = ['runit'] - _ctxt_comp.return_value = True - self.cmp_pkgrevno.return_value = -1 - if jewel_installed: - self.cmp_pkgrevno.return_value = 0 - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'service_domain': 'service_domain', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - 'api_version': '3', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_domain_name': 'service_domain', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '3', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'keystone_revocation_parameter_supported': True, - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_project_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_protocol': 'http', - 'user_roles': 'Babel,Dart', - } - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - def test_ids_ctxt_jewel(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[0, 1, -1]) - - def test_ids_ctxt_luminous(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[1, 1, 0]) - - def test_ids_ctxt_octopus(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[1, -1, 0]) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_no_rels(self, _log, _rids, _filter_installed_packages): - _rids.return_value = [] - ids_ctxt = context.IdentityServiceContext() - self.assertEqual(ids_ctxt(), None) - - class MonContextTest(CharmTestCase): def setUp(self):