From d9a91c30f2b239ce2f4f53675c03f0a678d812bf Mon Sep 17 00:00:00 2001 From: Guillaume Boutry Date: Fri, 5 May 2023 13:34:24 +0200 Subject: [PATCH] Bootstrap Barbican K8S charm --- .flake8 | 9 + .gitignore | 11 + .jujuignore | 3 + .stestr.conf | 3 + CONTRIBUTING.md | 33 + LICENSE | 202 +++++++ README.md | 69 +++ charmcraft.yaml | 22 + config.yaml | 9 + fetch-libs.sh | 8 + .../v0/database_requires.py | 502 ++++++++++++++++ .../keystone_k8s/v1/identity_service.py | 518 ++++++++++++++++ .../v1/kubernetes_service_patch.py | 341 +++++++++++ lib/charms/rabbitmq_k8s/v0/rabbitmq.py | 286 +++++++++ lib/charms/traefik_k8s/v1/ingress.py | 562 ++++++++++++++++++ metadata.yaml | 53 ++ pyproject.toml | 39 ++ requirements.txt | 14 + src/charm.py | 248 ++++++++ src/templates/barbican.conf | 24 + src/templates/parts/section-identity | 16 + src/templates/wsgi-barbican-api.conf | 51 ++ test-requirements.txt | 14 + tests/bundles/smoke.yaml | 70 +++ tests/config.yaml | 1 + tests/unit/__init__.py | 15 + tests/unit/test_barbican_charm.py | 116 ++++ tox.ini | 157 +++++ 28 files changed, 3396 insertions(+) create mode 100644 .flake8 create mode 100644 .gitignore create mode 100644 .jujuignore create mode 100644 .stestr.conf create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 charmcraft.yaml create mode 100644 config.yaml create mode 100755 fetch-libs.sh create mode 100644 lib/charms/data_platform_libs/v0/database_requires.py create mode 100644 lib/charms/keystone_k8s/v1/identity_service.py create mode 100644 lib/charms/observability_libs/v1/kubernetes_service_patch.py create mode 100644 lib/charms/rabbitmq_k8s/v0/rabbitmq.py create mode 100644 lib/charms/traefik_k8s/v1/ingress.py create mode 100644 metadata.yaml create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100755 src/charm.py create mode 100644 src/templates/barbican.conf create mode 100644 src/templates/parts/section-identity create mode 100644 src/templates/wsgi-barbican-api.conf create mode 100644 test-requirements.txt create mode 100644 tests/bundles/smoke.yaml create mode 120000 tests/config.yaml create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/test_barbican_charm.py create mode 100644 tox.ini diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..8ef84fc --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..73f116c --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +venv/ +build/ +*.charm +*.swp + +.coverage +__pycache__/ +*.py[cod] +.tox +.stestr/ +tempest.log diff --git a/.jujuignore b/.jujuignore new file mode 100644 index 0000000..6ccd559 --- /dev/null +++ b/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000..e4750de --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./tests/unit +top_dir=./tests diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..2969183 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing + +To make contributions to this charm, you'll need a working [development setup](https://juju.is/docs/sdk/dev-setup). + +You can use the environments created by `tox` for development: + +```shell +tox --notest -e unit +source .tox/unit/bin/activate +``` + +## Testing + +This project uses `tox` for managing test environments. There are some pre-configured environments +that can be used for linting and formatting code when you're preparing contributions to the charm: + +```shell +tox -e fmt # update your code according to linting rules +tox -e lint # code style +tox -e unit # unit tests +tox -e integration # integration tests +tox # runs 'lint' and 'unit' environments +``` + +## Build the charm + +Build the charm in this git repository using: + +```shell +charmcraft pack +``` + + + +[contributors-guide]: https://opendev.org/openstack/charm-barbican-k8s/src/branch/main/CONTRIBUTING.md +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[lp-bugs-charm-barbican-k8s]: https://bugs.launchpad.net/charm-barbican-k8s/+filebug diff --git a/charmcraft.yaml b/charmcraft.yaml new file mode 100644 index 0000000..64837c8 --- /dev/null +++ b/charmcraft.yaml @@ -0,0 +1,22 @@ +# This file configures Charmcraft. +# See https://juju.is/docs/sdk/charmcraft-config for guidance. + +type: charm +bases: + - build-on: + - name: ubuntu + channel: "22.04" + run-on: + - name: ubuntu + channel: "22.04" +parts: + charm: + build-packages: + - git + - libffi-dev + - libssl-dev + charm-binary-python-packages: + - cryptography + - jsonschema + - jinja2 + - git+https://opendev.org/openstack/charm-ops-sunbeam#egg=ops_sunbeam diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..1259ca4 --- /dev/null +++ b/config.yaml @@ -0,0 +1,9 @@ +options: + debug: + default: False + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Space delimited list of OpenStack regions + type: string diff --git a/fetch-libs.sh b/fetch-libs.sh new file mode 100755 index 0000000..57af6ac --- /dev/null +++ b/fetch-libs.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +echo "INFO: Fetching libs from charmhub." +charmcraft fetch-lib charms.data_platform_libs.v0.database_requires +charmcraft fetch-lib charms.keystone_k8s.v1.identity_service +charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq +charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch +charmcraft fetch-lib charms.traefik_k8s.v1.ingress diff --git a/lib/charms/data_platform_libs/v0/database_requires.py b/lib/charms/data_platform_libs/v0/database_requires.py new file mode 100644 index 0000000..6f425e7 --- /dev/null +++ b/lib/charms/data_platform_libs/v0/database_requires.py @@ -0,0 +1,502 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""[DEPRECATED] Relation 'requires' side abstraction for database relation. + +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +— database_created: event emitted when the requested database is created. +— endpoints_changed: event emitted when the read/write endpoints of the database have changed. +— read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` +""" + +import json +import logging +from collections import namedtuple +from datetime import datetime +from typing import List, Optional + +from ops.charm import ( + CharmEvents, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "0241e088ffa9440fb4e3126349b2fb62" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version. +LIBPATCH = 5 + +logger = logging.getLogger(__name__) + + +class DatabaseEvent(RelationEvent): + """Base class for database events.""" + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints.""" + return self.relation.data[self.relation.app].get("endpoints") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + return self.relation.data[self.relation.app].get("password") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints.""" + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + return self.relation.data[self.relation.app].get("replset") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + return self.relation.data[self.relation.app].get("tls-ca") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch and Kafka only. + """ + return self.relation.data[self.relation.app].get("uris") + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + return self.relation.data[self.relation.app].get("username") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(DatabaseEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +— added — keys that were added. +— changed — keys that still exist but have new values. +— deleted — keys that were deleted. +""" + + +class DatabaseRequires(Object): + """Requires-side of the database relation.""" + + on = DatabaseEvents() + + def __init__( + self, + charm, + relation_name: str, + database_name: str, + extra_user_roles: str = None, + relations_aliases: List[str] = None, + ): + """Manager of database client relations.""" + super().__init__(charm, relation_name) + self.charm = charm + self.database = database_name + self.extra_user_roles = extra_user_roles + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.relations_aliases = relations_aliases + self.framework.observe( + self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + ) + + # Define custom event names for each alias. + if relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[relation_name].limit + if len(relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + ) + + for relation_alias in relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + if ( + self.charm.model.get_relation(self.relation_name, relation_id) + .data[self.local_unit] + .get("alias") + ): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_name]: + alias = relation.data[self.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_unit].update({"alias": available_aliases[0]}) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the local unit relation databag. + old_data = json.loads(event.relation.data[self.local_unit].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + } + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[self.local_unit].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_name]: + if relation.id == relation_id: + return relation.data[self.local_unit].get("alias") + return None + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the database relation.""" + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if self.extra_user_roles: + self._update_relation_data( + event.relation.id, + { + "database": self.database, + "extra-user-roles": self.extra_user_roles, + }, + ) + else: + self._update_relation_data(event.relation.id, {"database": self.database}) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the database is created + # (the database charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + self.on.database_created.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + self.on.read_only_endpoints_changed.emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/lib/charms/keystone_k8s/v1/identity_service.py b/lib/charms/keystone_k8s/v1/identity_service.py new file mode 100644 index 0000000..3555662 --- /dev/null +++ b/lib/charms/keystone_k8s/v1/identity_service.py @@ -0,0 +1,518 @@ +"""IdentityServiceProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the identity_service interface. + +Import `IdentityServiceRequires` in your charm, with the charm object and the +relation name: + - self + - "identity_service" + +Also provide additional parameters to the charm object: + - service + - internal_url + - public_url + - admin_url + - region + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.keystone_k8s.v1.identity_service import IdentityServiceRequires + +class IdentityServiceClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # IdentityService Requires + self.identity_service = IdentityServiceRequires( + self, "identity_service", + service = "my-service" + internal_url = "http://internal-url" + public_url = "http://public-url" + admin_url = "http://admin-url" + region = "region" + ) + self.framework.observe( + self.identity_service.on.connected, self._on_identity_service_connected) + self.framework.observe( + self.identity_service.on.ready, self._on_identity_service_ready) + self.framework.observe( + self.identity_service.on.goneaway, self._on_identity_service_goneaway) + + def _on_identity_service_connected(self, event): + '''React to the IdentityService connected event. + + This event happens when n IdentityService relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_identity_service_ready(self, event): + '''React to the IdentityService ready event. + + The IdentityService interface will use the provided config for the + request to the identity server. + ''' + # IdentityService Relation is ready. Do something with the completed relation. + pass + + def _on_identity_service_goneaway(self, event): + '''React to the IdentityService goneaway event. + + This event happens when an IdentityService relation is removed. + ''' + # IdentityService Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +import json +import logging + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) +from ops.model import ( + Relation, + SecretNotFoundError, +) + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0fa7fe7236c14c6e9624acf232b9a3b0" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 0 + + +logger = logging.getLogger(__name__) + + +class IdentityServiceConnectedEvent(EventBase): + """IdentityService connected Event.""" + + pass + + +class IdentityServiceReadyEvent(EventBase): + """IdentityService ready for use Event.""" + + pass + + +class IdentityServiceGoneAwayEvent(EventBase): + """IdentityService relation has gone-away Event""" + + pass + + +class IdentityServiceServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(IdentityServiceConnectedEvent) + ready = EventSource(IdentityServiceReadyEvent) + goneaway = EventSource(IdentityServiceGoneAwayEvent) + + +class IdentityServiceRequires(Object): + """ + IdentityServiceRequires class + """ + + on = IdentityServiceServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, service_endpoints: dict, + region: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """IdentityService relation joined.""" + logging.debug("IdentityService on_joined") + self.on.connected.emit() + self.register_services( + self.service_endpoints, + self.region) + + def _on_identity_service_relation_changed(self, event): + """IdentityService relation changed.""" + logging.debug("IdentityService on_changed") + try: + self.service_password + self.on.ready.emit() + except (AttributeError, KeyError): + pass + + def _on_identity_service_relation_broken(self, event): + """IdentityService relation broken.""" + logging.debug("IdentityService on_broken") + self.on.goneaway.emit() + + @property + def _identity_service_rel(self) -> Relation: + """The IdentityService relation.""" + return self.framework.model.get_relation(self.relation_name) + + def get_remote_app_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + data = self._identity_service_rel.data[self._identity_service_rel.app] + return data.get(key) + + @property + def api_version(self) -> str: + """Return the api_version.""" + return self.get_remote_app_data('api-version') + + @property + def auth_host(self) -> str: + """Return the auth_host.""" + return self.get_remote_app_data('auth-host') + + @property + def auth_port(self) -> str: + """Return the auth_port.""" + return self.get_remote_app_data('auth-port') + + @property + def auth_protocol(self) -> str: + """Return the auth_protocol.""" + return self.get_remote_app_data('auth-protocol') + + @property + def internal_host(self) -> str: + """Return the internal_host.""" + return self.get_remote_app_data('internal-host') + + @property + def internal_port(self) -> str: + """Return the internal_port.""" + return self.get_remote_app_data('internal-port') + + @property + def internal_protocol(self) -> str: + """Return the internal_protocol.""" + return self.get_remote_app_data('internal-protocol') + + @property + def admin_domain_name(self) -> str: + """Return the admin_domain_name.""" + return self.get_remote_app_data('admin-domain-name') + + @property + def admin_domain_id(self) -> str: + """Return the admin_domain_id.""" + return self.get_remote_app_data('admin-domain-id') + + @property + def admin_project_name(self) -> str: + """Return the admin_project_name.""" + return self.get_remote_app_data('admin-project-name') + + @property + def admin_project_id(self) -> str: + """Return the admin_project_id.""" + return self.get_remote_app_data('admin-project-id') + + @property + def admin_user_name(self) -> str: + """Return the admin_user_name.""" + return self.get_remote_app_data('admin-user-name') + + @property + def admin_user_id(self) -> str: + """Return the admin_user_id.""" + return self.get_remote_app_data('admin-user-id') + + @property + def service_domain_name(self) -> str: + """Return the service_domain_name.""" + return self.get_remote_app_data('service-domain-name') + + @property + def service_domain_id(self) -> str: + """Return the service_domain_id.""" + return self.get_remote_app_data('service-domain-id') + + @property + def service_host(self) -> str: + """Return the service_host.""" + return self.get_remote_app_data('service-host') + + @property + def service_credentials(self) -> str: + """Return the service_credentials secret.""" + return self.get_remote_app_data('service-credentials') + + @property + def service_password(self) -> str: + """Return the service_password.""" + credentials_id = self.get_remote_app_data('service-credentials') + if not credentials_id: + return None + + try: + credentials = self.charm.model.get_secret(id=credentials_id) + return credentials.get_content().get("password") + except SecretNotFoundError: + logger.warning(f"Secret {credentials_id} not found") + return None + + @property + def service_port(self) -> str: + """Return the service_port.""" + return self.get_remote_app_data('service-port') + + @property + def service_protocol(self) -> str: + """Return the service_protocol.""" + return self.get_remote_app_data('service-protocol') + + @property + def service_project_name(self) -> str: + """Return the service_project_name.""" + return self.get_remote_app_data('service-project-name') + + @property + def service_project_id(self) -> str: + """Return the service_project_id.""" + return self.get_remote_app_data('service-project-id') + + @property + def service_user_name(self) -> str: + """Return the service_user_name.""" + credentials_id = self.get_remote_app_data('service-credentials') + if not credentials_id: + return None + + try: + credentials = self.charm.model.get_secret(id=credentials_id) + return credentials.get_content().get("username") + except SecretNotFoundError: + logger.warning(f"Secret {credentials_id} not found") + return None + + @property + def service_user_id(self) -> str: + """Return the service_user_id.""" + return self.get_remote_app_data('service-user-id') + + @property + def internal_auth_url(self) -> str: + """Return the internal_auth_url.""" + return self.get_remote_app_data('internal-auth-url') + + @property + def admin_auth_url(self) -> str: + """Return the admin_auth_url.""" + return self.get_remote_app_data('admin-auth-url') + + @property + def public_auth_url(self) -> str: + """Return the public_auth_url.""" + return self.get_remote_app_data('public-auth-url') + + def register_services(self, service_endpoints: dict, + region: str) -> None: + """Request access to the IdentityService server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting service registration") + app_data = self._identity_service_rel.data[self.charm.app] + app_data["service-endpoints"] = json.dumps( + service_endpoints, sort_keys=True + ) + app_data["region"] = region + + +class HasIdentityServiceClientsEvent(EventBase): + """Has IdentityServiceClients Event.""" + + pass + + +class ReadyIdentityServiceClientsEvent(EventBase): + """IdentityServiceClients Ready Event.""" + + def __init__(self, handle, relation_id, relation_name, service_endpoints, + region, client_app_name): + super().__init__(handle) + self.relation_id = relation_id + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.client_app_name = client_app_name + + def snapshot(self): + return { + "relation_id": self.relation_id, + "relation_name": self.relation_name, + "service_endpoints": self.service_endpoints, + "client_app_name": self.client_app_name, + "region": self.region} + + def restore(self, snapshot): + super().restore(snapshot) + self.relation_id = snapshot["relation_id"] + self.relation_name = snapshot["relation_name"] + self.service_endpoints = snapshot["service_endpoints"] + self.region = snapshot["region"] + self.client_app_name = snapshot["client_app_name"] + + +class IdentityServiceClientEvents(ObjectEvents): + """Events class for `on`""" + + has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent) + ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent) + + +class IdentityServiceProvides(Object): + """ + IdentityServiceProvides class + """ + + on = IdentityServiceClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """Handle IdentityService joined.""" + logging.debug("IdentityService on_joined") + self.on.has_identity_service_clients.emit() + + def _on_identity_service_relation_changed(self, event): + """Handle IdentityService changed.""" + logging.debug("IdentityService on_changed") + REQUIRED_KEYS = [ + 'service-endpoints', + 'region'] + + values = [ + event.relation.data[event.relation.app].get(k) + for k in REQUIRED_KEYS + ] + # Validate data on the relation + if all(values): + service_eps = json.loads( + event.relation.data[event.relation.app]['service-endpoints']) + self.on.ready_identity_service_clients.emit( + event.relation.id, + event.relation.name, + service_eps, + event.relation.data[event.relation.app]['region'], + event.relation.app.name) + + def _on_identity_service_relation_broken(self, event): + """Handle IdentityService broken.""" + logging.debug("IdentityServiceProvides on_departed") + # TODO clear data on the relation + + def set_identity_service_credentials(self, relation_name: int, + relation_id: str, + api_version: str, + auth_host: str, + auth_port: str, + auth_protocol: str, + internal_host: str, + internal_port: str, + internal_protocol: str, + service_host: str, + service_port: str, + service_protocol: str, + admin_domain: str, + admin_project: str, + admin_user: str, + service_domain: str, + service_project: str, + service_user: str, + internal_auth_url: str, + admin_auth_url: str, + public_auth_url: str, + service_credentials: str): + logging.debug("Setting identity_service connection information.") + _identity_service_rel = None + for relation in self.framework.model.relations[relation_name]: + if relation.id == relation_id: + _identity_service_rel = relation + if not _identity_service_rel: + # Relation has disappeared so skip send of data + return + app_data = _identity_service_rel.data[self.charm.app] + app_data["api-version"] = api_version + app_data["auth-host"] = auth_host + app_data["auth-port"] = str(auth_port) + app_data["auth-protocol"] = auth_protocol + app_data["internal-host"] = internal_host + app_data["internal-port"] = str(internal_port) + app_data["internal-protocol"] = internal_protocol + app_data["service-host"] = service_host + app_data["service-port"] = str(service_port) + app_data["service-protocol"] = service_protocol + app_data["admin-domain-name"] = admin_domain.name + app_data["admin-domain-id"] = admin_domain.id + app_data["admin-project-name"] = admin_project.name + app_data["admin-project-id"] = admin_project.id + app_data["admin-user-name"] = admin_user.name + app_data["admin-user-id"] = admin_user.id + app_data["service-domain-name"] = service_domain.name + app_data["service-domain-id"] = service_domain.id + app_data["service-project-name"] = service_project.name + app_data["service-project-id"] = service_project.id + app_data["service-user-id"] = service_user.id + app_data["internal-auth-url"] = internal_auth_url + app_data["admin-auth-url"] = admin_auth_url + app_data["public-auth-url"] = public_auth_url + app_data["service-credentials"] = service_credentials diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py new file mode 100644 index 0000000..64dd13c --- /dev/null +++ b/lib/charms/observability_libs/v1/kubernetes_service_patch.py @@ -0,0 +1,341 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +"""# KubernetesServicePatch Library. + +This library is designed to enable developers to more simply patch the Kubernetes Service created +by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a +service named after the application in the namespace (named after the Juju model). This service by +default contains a "placeholder" port, which is 65536/TCP. + +When modifying the default set of resources managed by Juju, one must consider the lifecycle of the +charm. In this case, any modifications to the default service (created during deployment), will be +overwritten during a charm upgrade. + +When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` +events which applies the patch to the cluster. This should ensure that the service ports are +correct throughout the charm's life. + +The constructor simply takes a reference to the parent charm, and a list of +[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the +service. For information regarding the `lightkube` `ServicePort` model, please visit the +`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). + +Optionally, a name of the service (in case service name needs to be patched as well), labels, +selectors, and annotations can be provided as keyword arguments. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. **Note +that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** + +```shell +cd some-charm +charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch +cat << EOF >> requirements.txt +lightkube +lightkube-models +EOF +``` + +Then, to initialise the library: + +For `ClusterIP` services: + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(443, name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch(self, [port]) + # ... +``` + +For `LoadBalancer`/`NodePort` services: + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) + self.service_patcher = KubernetesServicePatch( + self, [port], "LoadBalancer" + ) + # ... +``` + +Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") + udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") + sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") + self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) + # ... +``` + +Bound with custom events by providing `refresh_event` argument: +For example, you would like to have a configurable port in your charm and want to apply +service patch every time charm config is changed. + +```python +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch( + self, + [port], + refresh_event=self.on.config_changed + ) + # ... +``` + +Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library +does not try to make any API calls, or open any files during testing that are unlikely to be +present, and could break your tests. The easiest way to do this is during your test `setUp`: + +```python +# ... + +@patch("charm.KubernetesServicePatch", lambda x, y: None) +def setUp(self, *unused): + self.harness = Harness(SomeCharm) + # ... +``` +""" + +import logging +from types import MethodType +from typing import List, Literal, Optional, Union + +from lightkube import ApiError, Client +from lightkube.core import exceptions +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.resources.core_v1 import Service +from lightkube.types import PatchType +from ops.charm import CharmBase +from ops.framework import BoundEvent, Object + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0042f86d0a874435adef581806cddbbb" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 7 + +ServiceType = Literal["ClusterIP", "LoadBalancer"] + + +class KubernetesServicePatch(Object): + """A utility for patching the Kubernetes service set up by Juju.""" + + def __init__( + self, + charm: CharmBase, + ports: List[ServicePort], + service_name: Optional[str] = None, + service_type: ServiceType = "ClusterIP", + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, + *, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + ): + """Constructor for KubernetesServicePatch. + + Args: + charm: the charm that is instantiating the library. + ports: a list of ServicePorts + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + additional_labels: Labels to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_selectors: Selectors to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_annotations: Annotations to be added to the kubernetes service. + refresh_event: an optional bound event or list of bound events which + will be observed to re-apply the patch (e.g. on port change). + The `install` and `upgrade-charm` events would be observed regardless. + """ + super().__init__(charm, "kubernetes-service-patch") + self.charm = charm + self.service_name = service_name if service_name else self._app + self.service = self._service_object( + ports, + service_name, + service_type, + additional_labels, + additional_selectors, + additional_annotations, + ) + + # Make mypy type checking happy that self._patch is a method + assert isinstance(self._patch, MethodType) + # Ensure this patch is applied during the 'install' and 'upgrade-charm' events + self.framework.observe(charm.on.install, self._patch) + self.framework.observe(charm.on.upgrade_charm, self._patch) + self.framework.observe(charm.on.update_status, self._patch) + + # apply user defined events + if refresh_event: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + + for evt in refresh_event: + self.framework.observe(evt, self._patch) + + def _service_object( + self, + ports: List[ServicePort], + service_name: Optional[str] = None, + service_type: ServiceType = "ClusterIP", + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, + ) -> Service: + """Creates a valid Service representation. + + Args: + ports: a list of ServicePorts + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + additional_labels: Labels to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_selectors: Selectors to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_annotations: Annotations to be added to the kubernetes service. + + Returns: + Service: A valid representation of a Kubernetes Service with the correct ports. + """ + if not service_name: + service_name = self._app + labels = {"app.kubernetes.io/name": self._app} + if additional_labels: + labels.update(additional_labels) + selector = {"app.kubernetes.io/name": self._app} + if additional_selectors: + selector.update(additional_selectors) + return Service( + apiVersion="v1", + kind="Service", + metadata=ObjectMeta( + namespace=self._namespace, + name=service_name, + labels=labels, + annotations=additional_annotations, # type: ignore[arg-type] + ), + spec=ServiceSpec( + selector=selector, + ports=ports, + type=service_type, + ), + ) + + def _patch(self, _) -> None: + """Patch the Kubernetes service created by Juju to map the correct port. + + Raises: + PatchFailed: if patching fails due to lack of permissions, or otherwise. + """ + try: + client = Client() + except exceptions.ConfigError as e: + logger.warning("Error creating k8s client: %s", e) + return + + try: + if self._is_patched(client): + return + if self.service_name != self._app: + self._delete_and_create_service(client) + client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) + except ApiError as e: + if e.status.code == 403: + logger.error("Kubernetes service patch failed: `juju trust` this application.") + else: + logger.error("Kubernetes service patch failed: %s", str(e)) + else: + logger.info("Kubernetes service '%s' patched successfully", self._app) + + def _delete_and_create_service(self, client: Client): + service = client.get(Service, self._app, namespace=self._namespace) + service.metadata.name = self.service_name # type: ignore[attr-defined] + service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 + client.delete(Service, self._app, namespace=self._namespace) + client.create(service) + + def is_patched(self) -> bool: + """Reports if the service patch has been applied. + + Returns: + bool: A boolean indicating if the service patch has been applied. + """ + client = Client() + return self._is_patched(client) + + def _is_patched(self, client: Client) -> bool: + # Get the relevant service from the cluster + try: + service = client.get(Service, name=self.service_name, namespace=self._namespace) + except ApiError as e: + if e.status.code == 404 and self.service_name != self._app: + return False + logger.error("Kubernetes service get failed: %s", str(e)) + raise + + # Construct a list of expected ports, should the patch be applied + expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] + # Construct a list in the same manner, using the fetched service + fetched_ports = [ + (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] + ] # noqa: E501 + return expected_ports == fetched_ports + + @property + def _app(self) -> str: + """Name of the current Juju application. + + Returns: + str: A string containing the name of the current Juju application. + """ + return self.charm.app.name + + @property + def _namespace(self) -> str: + """The Kubernetes namespace we're running in. + + Returns: + str: A string containing the name of the current Kubernetes namespace. + """ + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() diff --git a/lib/charms/rabbitmq_k8s/v0/rabbitmq.py b/lib/charms/rabbitmq_k8s/v0/rabbitmq.py new file mode 100644 index 0000000..c7df240 --- /dev/null +++ b/lib/charms/rabbitmq_k8s/v0/rabbitmq.py @@ -0,0 +1,286 @@ +"""RabbitMQProvides and Requires module. + +This library contains the Requires and Provides classes for handling +the rabbitmq interface. + +Import `RabbitMQRequires` in your charm, with the charm object and the +relation name: + - self + - "amqp" + +Also provide two additional parameters to the charm object: + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.rabbitmq_k8s.v0.rabbitmq import RabbitMQRequires + +class RabbitMQClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # RabbitMQ Requires + self.amqp = RabbitMQRequires( + self, "amqp", + username="myusername", + vhost="vhostname" + ) + self.framework.observe( + self.amqp.on.connected, self._on_amqp_connected) + self.framework.observe( + self.amqp.on.ready, self._on_amqp_ready) + self.framework.observe( + self.amqp.on.goneaway, self._on_amqp_goneaway) + + def _on_amqp_connected(self, event): + '''React to the RabbitMQ connected event. + + This event happens when n RabbitMQ relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_amqp_ready(self, event): + '''React to the RabbitMQ ready event. + + The RabbitMQ interface will use the provided username and vhost for the + request to the rabbitmq server. + ''' + # RabbitMQ Relation is ready. Do something with the completed relation. + pass + + def _on_amqp_goneaway(self, event): + '''React to the RabbitMQ goneaway event. + + This event happens when an RabbitMQ relation is removed. + ''' + # RabbitMQ Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "45622352791142fd9cf87232e3bd6f2a" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +import logging + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class RabbitMQConnectedEvent(EventBase): + """RabbitMQ connected Event.""" + + pass + + +class RabbitMQReadyEvent(EventBase): + """RabbitMQ ready for use Event.""" + + pass + + +class RabbitMQGoneAwayEvent(EventBase): + """RabbitMQ relation has gone-away Event""" + + pass + + +class RabbitMQServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(RabbitMQConnectedEvent) + ready = EventSource(RabbitMQReadyEvent) + goneaway = EventSource(RabbitMQGoneAwayEvent) + + +class RabbitMQRequires(Object): + """ + RabbitMQRequires class + """ + + on = RabbitMQServerEvents() + + def __init__(self, charm, relation_name: str, username: str, vhost: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.username = username + self.vhost = vhost + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """RabbitMQ relation joined.""" + logging.debug("RabbitMQRabbitMQRequires on_joined") + self.on.connected.emit() + self.request_access(self.username, self.vhost) + + def _on_amqp_relation_changed(self, event): + """RabbitMQ relation changed.""" + logging.debug("RabbitMQRabbitMQRequires on_changed/departed") + if self.password: + self.on.ready.emit() + + def _on_amqp_relation_broken(self, event): + """RabbitMQ relation broken.""" + logging.debug("RabbitMQRabbitMQRequires on_broken") + self.on.goneaway.emit() + + @property + def _amqp_rel(self) -> Relation: + """The RabbitMQ relation.""" + return self.framework.model.get_relation(self.relation_name) + + @property + def password(self) -> str: + """Return the RabbitMQ password from the server side of the relation.""" + return self._amqp_rel.data[self._amqp_rel.app].get("password") + + @property + def hostname(self) -> str: + """Return the hostname from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("hostname") + + @property + def ssl_port(self) -> str: + """Return the SSL port from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port") + + @property + def ssl_ca(self) -> str: + """Return the SSL port from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca") + + @property + def hostnames(self) -> List[str]: + """Return a list of remote RMQ hosts from the RabbitMQ relation""" + _hosts = [] + for unit in self._amqp_rel.units: + _hosts.append(self._amqp_rel.data[unit].get("ingress-address")) + return _hosts + + def request_access(self, username: str, vhost: str) -> None: + """Request access to the RabbitMQ server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting RabbitMQ user and vhost") + self._amqp_rel.data[self.charm.app]["username"] = username + self._amqp_rel.data[self.charm.app]["vhost"] = vhost + + +class HasRabbitMQClientsEvent(EventBase): + """Has RabbitMQClients Event.""" + + pass + + +class ReadyRabbitMQClientsEvent(EventBase): + """RabbitMQClients Ready Event.""" + + pass + + +class RabbitMQClientEvents(ObjectEvents): + """Events class for `on`""" + + has_amqp_clients = EventSource(HasRabbitMQClientsEvent) + ready_amqp_clients = EventSource(ReadyRabbitMQClientsEvent) + + +class RabbitMQProvides(Object): + """ + RabbitMQProvides class + """ + + on = RabbitMQClientEvents() + + def __init__(self, charm, relation_name, callback): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.callback = callback + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """Handle RabbitMQ joined.""" + logging.debug("RabbitMQRabbitMQProvides on_joined data={}" + .format(event.relation.data[event.relation.app])) + self.on.has_amqp_clients.emit() + + def _on_amqp_relation_changed(self, event): + """Handle RabbitMQ changed.""" + logging.debug("RabbitMQRabbitMQProvides on_changed data={}" + .format(event.relation.data[event.relation.app])) + # Validate data on the relation + if self.username(event) and self.vhost(event): + self.on.ready_amqp_clients.emit() + if self.charm.unit.is_leader(): + self.callback(event, self.username(event), self.vhost(event)) + else: + logging.warning("Received RabbitMQ changed event without the " + "expected keys ('username', 'vhost') in the " + "application data bag. Incompatible charm in " + "other end of relation?") + + def _on_amqp_relation_broken(self, event): + """Handle RabbitMQ broken.""" + logging.debug("RabbitMQRabbitMQProvides on_departed") + # TODO clear data on the relation + + def username(self, event): + """Return the RabbitMQ username from the client side of the relation.""" + return event.relation.data[event.relation.app].get("username") + + def vhost(self, event): + """Return the RabbitMQ vhost from the client side of the relation.""" + return event.relation.data[event.relation.app].get("vhost") diff --git a/lib/charms/traefik_k8s/v1/ingress.py b/lib/charms/traefik_k8s/v1/ingress.py new file mode 100644 index 0000000..b31a24d --- /dev/null +++ b/lib/charms/traefik_k8s/v1/ingress.py @@ -0,0 +1,562 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +r"""# Interface Library for ingress. + +This library wraps relation endpoints using the `ingress` interface +and provides a Python API for both requesting and providing per-application +ingress, with load-balancing occurring across all units. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. + +```shell +cd some-charm +charmcraft fetch-lib charms.traefik_k8s.v1.ingress +``` + +In the `metadata.yaml` of the charm, add the following: + +```yaml +requires: + ingress: + interface: ingress + limit: 1 +``` + +Then, to initialise the library: + +```python +from charms.traefik_k8s.v1.ingress import (IngressPerAppRequirer, + IngressPerAppReadyEvent, IngressPerAppRevokedEvent) + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.ingress = IngressPerAppRequirer(self, port=80) + # The following event is triggered when the ingress URL to be used + # by this deployment of the `SomeCharm` is ready (or changes). + self.framework.observe( + self.ingress.on.ready, self._on_ingress_ready + ) + self.framework.observe( + self.ingress.on.revoked, self._on_ingress_revoked + ) + + def _on_ingress_ready(self, event: IngressPerAppReadyEvent): + logger.info("This app's ingress URL: %s", event.url) + + def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): + logger.info("This app no longer has ingress") +""" + +import logging +import socket +import typing +from typing import Any, Dict, Optional, Tuple, Union + +import yaml +from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent +from ops.framework import EventSource, Object, ObjectEvents, StoredState +from ops.model import ModelError, Relation + +# The unique Charmhub library identifier, never change it +LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 12 + +DEFAULT_RELATION_NAME = "ingress" +RELATION_INTERFACE = "ingress" + +log = logging.getLogger(__name__) + +try: + import jsonschema + + DO_VALIDATION = True +except ModuleNotFoundError: + log.warning( + "The `ingress` library needs the `jsonschema` package to be able " + "to do runtime data validation; without it, it will still work but validation " + "will be disabled. \n" + "It is recommended to add `jsonschema` to the 'requirements.txt' of your charm, " + "which will enable this feature." + ) + DO_VALIDATION = False + +INGRESS_REQUIRES_APP_SCHEMA = { + "type": "object", + "properties": { + "model": {"type": "string"}, + "name": {"type": "string"}, + "host": {"type": "string"}, + "port": {"type": "string"}, + "strip-prefix": {"type": "string"}, + }, + "required": ["model", "name", "host", "port"], +} + +INGRESS_PROVIDES_APP_SCHEMA = { + "type": "object", + "properties": { + "ingress": {"type": "object", "properties": {"url": {"type": "string"}}}, + }, + "required": ["ingress"], +} + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict # py35 compat + +# Model of the data a unit implementing the requirer will need to provide. +RequirerData = TypedDict( + "RequirerData", + {"model": str, "name": str, "host": str, "port": int, "strip-prefix": bool}, + total=False, +) +# Provider ingress data model. +ProviderIngressData = TypedDict("ProviderIngressData", {"url": str}) +# Provider application databag model. +ProviderApplicationData = TypedDict("ProviderApplicationData", {"ingress": ProviderIngressData}) # type: ignore + + +def _validate_data(data, schema): + """Checks whether `data` matches `schema`. + + Will raise DataValidationError if the data is not valid, else return None. + """ + if not DO_VALIDATION: + return + try: + jsonschema.validate(instance=data, schema=schema) + except jsonschema.ValidationError as e: + raise DataValidationError(data, schema) from e + + +class DataValidationError(RuntimeError): + """Raised when data validation fails on IPU relation data.""" + + +class _IngressPerAppBase(Object): + """Base class for IngressPerUnit interface classes.""" + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + super().__init__(charm, relation_name) + + self.charm: CharmBase = charm + self.relation_name = relation_name + self.app = self.charm.app + self.unit = self.charm.unit + + observe = self.framework.observe + rel_events = charm.on[relation_name] + observe(rel_events.relation_created, self._handle_relation) + observe(rel_events.relation_joined, self._handle_relation) + observe(rel_events.relation_changed, self._handle_relation) + observe(rel_events.relation_broken, self._handle_relation_broken) + observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore + observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore + + @property + def relations(self): + """The list of Relation instances associated with this endpoint.""" + return list(self.charm.model.relations[self.relation_name]) + + def _handle_relation(self, event): + """Subclasses should implement this method to handle a relation update.""" + pass + + def _handle_relation_broken(self, event): + """Subclasses should implement this method to handle a relation breaking.""" + pass + + def _handle_upgrade_or_leader(self, event): + """Subclasses should implement this method to handle upgrades or leadership change.""" + pass + + +class _IPAEvent(RelationEvent): + __args__ = () # type: Tuple[str, ...] + __optional_kwargs__ = {} # type: Dict[str, Any] + + @classmethod + def __attrs__(cls): + return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) + + def __init__(self, handle, relation, *args, **kwargs): + super().__init__(handle, relation) + + if not len(self.__args__) == len(args): + raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) + + for attr, obj in zip(self.__args__, args): + setattr(self, attr, obj) + for attr, default in self.__optional_kwargs__.items(): + obj = kwargs.get(attr, default) + setattr(self, attr, obj) + + def snapshot(self): + dct = super().snapshot() + for attr in self.__attrs__(): + obj = getattr(self, attr) + try: + dct[attr] = obj + except ValueError as e: + raise ValueError( + "cannot automagically serialize {}: " + "override this method and do it " + "manually.".format(obj) + ) from e + + return dct + + def restore(self, snapshot) -> None: + super().restore(snapshot) + for attr, obj in snapshot.items(): + setattr(self, attr, obj) + + +class IngressPerAppDataProvidedEvent(_IPAEvent): + """Event representing that ingress data has been provided for an app.""" + + __args__ = ("name", "model", "port", "host", "strip_prefix") + + if typing.TYPE_CHECKING: + name = None # type: Optional[str] + model = None # type: Optional[str] + port = None # type: Optional[str] + host = None # type: Optional[str] + strip_prefix = False # type: bool + + +class IngressPerAppDataRemovedEvent(RelationEvent): + """Event representing that ingress data has been removed for an app.""" + + +class IngressPerAppProviderEvents(ObjectEvents): + """Container for IPA Provider events.""" + + data_provided = EventSource(IngressPerAppDataProvidedEvent) + data_removed = EventSource(IngressPerAppDataRemovedEvent) + + +class IngressPerAppProvider(_IngressPerAppBase): + """Implementation of the provider of ingress.""" + + on = IngressPerAppProviderEvents() # type: ignore + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + """Constructor for IngressPerAppProvider. + + Args: + charm: The charm that is instantiating the instance. + relation_name: The name of the relation endpoint to bind to + (defaults to "ingress"). + """ + super().__init__(charm, relation_name) + + def _handle_relation(self, event): + # created, joined or changed: if remote side has sent the required data: + # notify listeners. + if self.is_ready(event.relation): + data = self._get_requirer_data(event.relation) + self.on.data_provided.emit( # type: ignore + event.relation, + data["name"], + data["model"], + data["port"], + data["host"], + data.get("strip-prefix", False), + ) + + def _handle_relation_broken(self, event): + self.on.data_removed.emit(event.relation) # type: ignore + + def wipe_ingress_data(self, relation: Relation): + """Clear ingress data from relation.""" + assert self.unit.is_leader(), "only leaders can do this" + try: + relation.data + except ModelError as e: + log.warning( + "error {} accessing relation data for {!r}. " + "Probably a ghost of a dead relation is still " + "lingering around.".format(e, relation.name) + ) + return + del relation.data[self.app]["ingress"] + + def _get_requirer_data(self, relation: Relation) -> RequirerData: # type: ignore + """Fetch and validate the requirer's app databag. + + For convenience, we convert 'port' to integer. + """ + if not relation.app or not relation.app.name: # type: ignore + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return {} + + databag = relation.data[relation.app] + remote_data = {} # type: Dict[str, Union[int, str]] + for k in ("port", "host", "model", "name", "mode", "strip-prefix"): + v = databag.get(k) + if v is not None: + remote_data[k] = v + _validate_data(remote_data, INGRESS_REQUIRES_APP_SCHEMA) + remote_data["port"] = int(remote_data["port"]) + remote_data["strip-prefix"] = bool(remote_data.get("strip-prefix", False)) + return typing.cast(RequirerData, remote_data) + + def get_data(self, relation: Relation) -> RequirerData: # type: ignore + """Fetch the remote app's databag, i.e. the requirer data.""" + return self._get_requirer_data(relation) + + def is_ready(self, relation: Optional[Relation] = None): + """The Provider is ready if the requirer has sent valid data.""" + if not relation: + return any(map(self.is_ready, self.relations)) + + try: + return bool(self._get_requirer_data(relation)) + except DataValidationError as e: + log.warning("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _provided_url(self, relation: Relation) -> ProviderIngressData: # type: ignore + """Fetch and validate this app databag; return the ingress url.""" + if not relation.app or not relation.app.name or not self.unit.is_leader(): # type: ignore + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # Also, only leader units can read own app databags. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return typing.cast(ProviderIngressData, {}) # noqa + + # fetch the provider's app databag + raw_data = relation.data[self.app].get("ingress") + if not raw_data: + raise RuntimeError("This application did not `publish_url` yet.") + + ingress: ProviderIngressData = yaml.safe_load(raw_data) + _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) + return ingress + + def publish_url(self, relation: Relation, url: str): + """Publish to the app databag the ingress url.""" + ingress = {"url": url} + ingress_data = {"ingress": ingress} + _validate_data(ingress_data, INGRESS_PROVIDES_APP_SCHEMA) + relation.data[self.app]["ingress"] = yaml.safe_dump(ingress) + + @property + def proxied_endpoints(self): + """Returns the ingress settings provided to applications by this IngressPerAppProvider. + + For example, when this IngressPerAppProvider has provided the + `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary + will be: + + ``` + { + "my-app": { + "url": "http://foo.bar/my-model.my-app" + } + } + ``` + """ + results = {} + + for ingress_relation in self.relations: + assert ( + ingress_relation.app + ), "no app in relation (shouldn't happen)" # for type checker + results[ingress_relation.app.name] = self._provided_url(ingress_relation) + + return results + + +class IngressPerAppReadyEvent(_IPAEvent): + """Event representing that ingress for an app is ready.""" + + __args__ = ("url",) + if typing.TYPE_CHECKING: + url = None # type: Optional[str] + + +class IngressPerAppRevokedEvent(RelationEvent): + """Event representing that ingress for an app has been revoked.""" + + +class IngressPerAppRequirerEvents(ObjectEvents): + """Container for IPA Requirer events.""" + + ready = EventSource(IngressPerAppReadyEvent) + revoked = EventSource(IngressPerAppRevokedEvent) + + +class IngressPerAppRequirer(_IngressPerAppBase): + """Implementation of the requirer of the ingress relation.""" + + on = IngressPerAppRequirerEvents() # type: ignore + + # used to prevent spurious urls to be sent out if the event we're currently + # handling is a relation-broken one. + _stored = StoredState() + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + *, + host: Optional[str] = None, + port: Optional[int] = None, + strip_prefix: bool = False, + ): + """Constructor for IngressRequirer. + + The request args can be used to specify the ingress properties when the + instance is created. If any are set, at least `port` is required, and + they will be sent to the ingress provider as soon as it is available. + All request args must be given as keyword args. + + Args: + charm: the charm that is instantiating the library. + relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); + relation must be of interface type `ingress` and have "limit: 1") + host: Hostname to be used by the ingress provider to address the requiring + application; if unspecified, the default Kubernetes service name will be used. + strip_prefix: configure Traefik to strip the path prefix. + + Request Args: + port: the port of the service + """ + super().__init__(charm, relation_name) + self.charm: CharmBase = charm + self.relation_name = relation_name + self._strip_prefix = strip_prefix + + self._stored.set_default(current_url=None) # type: ignore + + # if instantiated with a port, and we are related, then + # we immediately publish our ingress data to speed up the process. + if port: + self._auto_data = host, port + else: + self._auto_data = None + + def _handle_relation(self, event): + # created, joined or changed: if we have auto data: publish it + self._publish_auto_data(event.relation) + + if self.is_ready(): + # Avoid spurious events, emit only when there is a NEW URL available + new_url = ( + None + if isinstance(event, RelationBrokenEvent) + else self._get_url_from_relation_data() + ) + if self._stored.current_url != new_url: # type: ignore + self._stored.current_url = new_url # type: ignore + self.on.ready.emit(event.relation, new_url) # type: ignore + + def _handle_relation_broken(self, event): + self._stored.current_url = None # type: ignore + self.on.revoked.emit(event.relation) # type: ignore + + def _handle_upgrade_or_leader(self, event): + """On upgrade/leadership change: ensure we publish the data we have.""" + for relation in self.relations: + self._publish_auto_data(relation) + + def is_ready(self): + """The Requirer is ready if the Provider has sent valid data.""" + try: + return bool(self._get_url_from_relation_data()) + except DataValidationError as e: + log.warning("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _publish_auto_data(self, relation: Relation): + if self._auto_data and self.unit.is_leader(): + host, port = self._auto_data + self.provide_ingress_requirements(host=host, port=port) + + def provide_ingress_requirements(self, *, host: Optional[str] = None, port: int): + """Publishes the data that Traefik needs to provide ingress. + + NB only the leader unit is supposed to do this. + + Args: + host: Hostname to be used by the ingress provider to address the + requirer unit; if unspecified, FQDN will be used instead + port: the port of the service (required) + """ + # get only the leader to publish the data since we only + # require one unit to publish it -- it will not differ between units, + # unlike in ingress-per-unit. + assert self.unit.is_leader(), "only leaders should do this." + assert self.relation, "no relation" + + if not host: + host = socket.getfqdn() + + data = { + "model": self.model.name, + "name": self.app.name, + "host": host, + "port": str(port), + } + + if self._strip_prefix: + data["strip-prefix"] = "true" + + _validate_data(data, INGRESS_REQUIRES_APP_SCHEMA) + self.relation.data[self.app].update(data) + + @property + def relation(self): + """The established Relation instance, or None.""" + return self.relations[0] if self.relations else None + + def _get_url_from_relation_data(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + relation = self.relation + if not relation or not relation.app: + return None + + # fetch the provider's app databag + try: + raw = relation.data.get(relation.app, {}).get("ingress") + except ModelError as e: + log.debug( + f"Error {e} attempting to read remote app data; " + f"probably we are in a relation_departed hook" + ) + return None + + if not raw: + return None + + ingress: ProviderIngressData = yaml.safe_load(raw) + _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) + return ingress["url"] + + @property + def url(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + data = self._stored.current_url or self._get_url_from_relation_data() # type: ignore + assert isinstance(data, (str, type(None))) # for static checker + return data diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 0000000..a1f89f0 --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,53 @@ +name: charm-barbican-k8s +display-name: Barbican +summary: A very short one-line summary of the charm. +description: | + A single sentence that says what the charm is, concisely and memorably. + A paragraph of one to three short sentences, that describe what the charm does. + A third paragraph that explains what need the charm meets. + Finally, a paragraph that describes whom the charm is useful for. +maintainer: Openstack Charmers +source: https://opendev.org/openstack/charm-barbican-k8s +issues: https://bugs.launchpad.net/charm-barbican-k8s + +bases: + - name: ubuntu + channel: 22.04/stable + +requires: + ingress-internal: + interface: ingress + limit: 1 + optional: true + ingress-public: + interface: ingress + limit: 1 + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + amqp: + interface: rabbitmq + +peers: + peers: + interface: barbican-peer + +containers: + barbican-api: + resource: barbican-api-image + barbican-worker: + resource: barbican-worker-image + +resources: + barbican-api-image: + type: oci-image + description: OCI image for OpenStack Barbican API + # kolla/ubuntu-binary-barbican-api:yoga + upstream-source: docker.io/kolla/ubuntu-binary-barbican-api@sha256:8428bb4b6289075832286e098febcc2ff9954df97c279cf8f18c798d188c8e3e + barbican-worker-image: + type: oci-image + description: OCI image for OpenStack Barbican worker + # kolla/ubuntu-binary-barbican-worker:yoga + upstream-source: docker.io/kolla/ubuntu-binary-barbican-worker@sha256:4c9d8252bbb8b99d729b28eb586d9c3c08c3a882b076682bae83a0e01600b5f8 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..2896bc0 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,39 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +# Testing tools configuration +[tool.coverage.run] +branch = true + +[tool.coverage.report] +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" + +# Formatting tools configuration +[tool.black] +line-length = 79 + +[tool.isort] +profile = "black" +multi_line_output = 3 +force_grid_wrap = true + +# Linting tools configuration +[tool.flake8] +max-line-length = 79 +max-doc-length = 99 +max-complexity = 10 +exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"] +select = ["E", "W", "F", "C", "N", "R", "D", "H"] +# Ignore W503, E501 because using black creates errors with this +# Ignore D107 Missing docstring in __init__ +ignore = ["W503", "E501", "D107", "E402"] +per-file-ignores = [] +docstring-convention = "google" +# Check for properly formatted copyright header in each file +copyright-check = "True" +copyright-author = "Canonical Ltd." +copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2ab26f7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# + +cryptography +jinja2 +jsonschema +lightkube +lightkube-models +ops +pwgen +git+https://opendev.org/openstack/charm-ops-sunbeam#egg=ops_sunbeam diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 0000000..e33196e --- /dev/null +++ b/src/charm.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Barbican Operator Charm. + +This charm provide Barbican services as part of an OpenStack deployment +""" +import logging +from typing import ( + List, +) + +import ops_sunbeam.charm as sunbeam_charm +import ops_sunbeam.config_contexts as sunbeam_ctxts +import ops_sunbeam.container_handlers as sunbeam_chandlers +import ops_sunbeam.core as sunbeam_core +import ops_sunbeam.job_ctrl as sunbeam_job_ctrl +from ops import ( + framework, + model, + pebble, +) +from ops.main import ( + main, +) + +logger = logging.getLogger(__name__) + +BARBICAN_API_CONTAINER = "barbican-api" +BARBICAN_WORKER_CONTAINER = "barbican-worker" + + +class WSGIBarbicanAdminConfigContext(sunbeam_ctxts.ConfigContext): + """Configuration context for WSGI configuration.""" + + def context(self) -> dict: + """WSGI configuration options.""" + log_svc_name = self.charm.service_name.replace("-", "_") + return { + "name": self.charm.service_name, + "public_port": 9312, + "user": self.charm.service_user, + "group": self.charm.service_group, + "wsgi_admin_script": "/usr/bin/barbican-wsgi-api", + "wsgi_public_script": "/usr/bin/barbican-wsgi-api", + "error_log": f"/var/log/apache2/{log_svc_name}_error.log", + "custom_log": f"/var/log/apache2/{log_svc_name}_access.log", + } + + +class BarbicanWorkerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler): + """Pebble handler for Barbican worker.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.enable_service_check = True + + def get_layer(self) -> dict: + """Barbican worker service layer. + + :returns: pebble layer configuration for worker service + :rtype: dict + """ + return { + "summary": "barbican worker layer", + "description": "pebble configuration for barbican worker", + "services": { + "barbican-worker": { + "override": "replace", + "summary": "Barbican Worker", + "command": "barbican-worker", + "startup": "enabled", + } + }, + } + + def get_healthcheck_layer(self) -> dict: + """Health check pebble layer. + + :returns: pebble health check layer configuration for scheduler service + :rtype: dict + """ + return { + "checks": { + "online": { + "override": "replace", + "level": "ready", + "exec": {"command": "service barbican-worker status"}, + }, + } + } + + def default_container_configs( + self, + ) -> List[sunbeam_core.ContainerConfigFile]: + """Container configurations for handler.""" + return [ + sunbeam_core.ContainerConfigFile( + "/etc/barbican/barbican.conf", "barbican", "barbican" + ) + ] + + @property + def service_ready(self) -> bool: + """Determine whether the service the container provides is running.""" + if self.enable_service_check: + logging.debug("Service checks enabled for barbican worker") + return super().service_ready + else: + logging.debug("Service checks disabled for barbican worker") + return self.pebble_ready + + +class BarbicanOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm): + """Charm the service.""" + + _state = framework.StoredState() + service_name = "barbican-api" + wsgi_admin_script = "/usr/bin/barbican-wsgi-api" + wsgi_public_script = "/usr/bin/barbican-wsgi-api" + mandatory_relations = { + "database", + "amqp", + "identity-service", + "ingress-public", + } + + db_sync_cmds = [ + ["sudo", "-u", "barbican", "barbican-manage", "db", "upgrade"] + ] + + def configure_unit(self, event: framework.EventBase) -> None: + """Run configuration on this unit.""" + self.disable_barbican_config() + self.check_leader_ready() + self.check_relation_handlers_ready() + self.init_container_services() + self.check_pebble_handlers_ready() + self.run_db_sync() + self._state.unit_bootstrapped = True + + @property + def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]: + """Generate list of configuration adapters for the charm.""" + _cadapters = super().config_contexts + _cadapters.extend( + [ + WSGIBarbicanAdminConfigContext( + self, + "wsgi_barbican_admin", + ) + ] + ) + return _cadapters + + @sunbeam_job_ctrl.run_once_per_unit("a2disconf") + def disable_barbican_config(self): + """Disable default barbican config.""" + container = self.unit.get_container(BARBICAN_API_CONTAINER) + try: + process = container.exec( + ["a2disconf", "barbican-api"], timeout=5 * 60 + ) + out, warnings = process.wait_output() + if warnings: + for line in warnings.splitlines(): + logger.warning("a2disconf warn: %s", line.strip()) + logging.debug(f"Output from a2disconf: \n{out}") + except pebble.ExecError: + logger.exception("Failed to disable barbican-api conf in apache") + self.status = model.ErrorStatus( + "Failed to disable barbican-api conf in apache" + ) + + def get_pebble_handlers( + self, + ) -> List[sunbeam_chandlers.ServicePebbleHandler]: + """Pebble handlers for operator.""" + pebble_handlers = super().get_pebble_handlers() + pebble_handlers.extend( + [ + BarbicanWorkerPebbleHandler( + self, + BARBICAN_WORKER_CONTAINER, + "barbican-worker", + [], + self.template_dir, + self.configure_charm, + ), + ] + ) + return pebble_handlers + + @property + def service_conf(self) -> str: + """Service default configuration file.""" + return "/etc/barbican/barbican.conf" + + @property + def service_user(self) -> str: + """Service user file and directory ownership.""" + return "barbican" + + @property + def service_group(self) -> str: + """Service group file and directory ownership.""" + return "barbican" + + @property + def service_endpoints(self): + """Service endpoints configuration.""" + return [ + { + "service_name": "barbican", + "type": "key-manager", + "description": "OpenStack Barbican API", + "internal_url": f"{self.internal_url}", + "public_url": f"{self.public_url}", + "admin_url": f"{self.admin_url}", + } + ] + + @property + def default_public_ingress_port(self): + """Default port.""" + return 9311 + + @property + def healthcheck_http_url(self) -> str: + """Healthcheck HTTP URL for the service.""" + # / returns a 300 return code, which is not understood by Pebble as OK + return super().healthcheck_http_url + "?build" + + +if __name__ == "__main__": + main(BarbicanOperatorCharm) diff --git a/src/templates/barbican.conf b/src/templates/barbican.conf new file mode 100644 index 0000000..3325500 --- /dev/null +++ b/src/templates/barbican.conf @@ -0,0 +1,24 @@ +[DEFAULT] +log_dir = /var/log/barbican +lock_path = /var/lock/barbican +state_path = /var/lib/barbican + +host_href = "" +transport_url = {{ amqp.transport_url }} + +sql_connection = {{ database.connection }} +db_auto_create = false + +[keystone_authtoken] +{% include "parts/section-identity" %} + +[secretstore] +namespace = barbican.crypto.plugin +enabled_secretstore_plugins = store_crypto + +[crypto] +enabled_crypto_plugins = simple_crypto + +[simple_crypto_plugin] +# the kek should be a 32-byte value which is base64 encoded +kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY=' diff --git a/src/templates/parts/section-identity b/src/templates/parts/section-identity new file mode 100644 index 0000000..9672c97 --- /dev/null +++ b/src/templates/parts/section-identity @@ -0,0 +1,16 @@ +{% if identity_service.internal_auth_url -%} +www_authenticate_uri = {{ identity_service.internal_auth_url }} +auth_url = {{ identity_service.internal_auth_url }} +{% elif identity_service.internal_host -%} +www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +{% endif -%} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} + +# XXX Region should come from the id relation here +region_name = {{ options.region }} diff --git a/src/templates/wsgi-barbican-api.conf b/src/templates/wsgi-barbican-api.conf new file mode 100644 index 0000000..9070332 --- /dev/null +++ b/src/templates/wsgi-barbican-api.conf @@ -0,0 +1,51 @@ +Listen {{ wsgi_config.public_port }} +Listen {{ wsgi_barbican_admin.public_port }} + + WSGIDaemonProcess barbican-api processes=4 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \ + display-name=%{GROUP} + WSGIProcessGroup barbican-api + {% if ingress_public and ingress_public.ingress_path -%} + WSGIScriptAlias {{ ingress_public.ingress_path }} {{ wsgi_config.wsgi_public_script }} + {% endif -%} + WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_config.error_log }} + CustomLog {{ wsgi_config.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + + WSGIDaemonProcess barbican-admin-api processes=4 threads=1 user={{ wsgi_barbican_admin.user }} group={{ wsgi_barbican_admin.group }} \ + display-name=%{GROUP} + WSGIProcessGroup barbican-admin-api + WSGIScriptAlias / {{ wsgi_barbican_admin.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_barbican_admin.error_log }} + CustomLog {{ wsgi_barbican_admin.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..0b8ca0c --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,14 @@ +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# + +coverage +mock +flake8 +stestr +git+https://github.com/openstack-charmers/zaza.git@libjuju-3.1#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://opendev.org/openstack/tempest.git#egg=tempest +ops diff --git a/tests/bundles/smoke.yaml b/tests/bundles/smoke.yaml new file mode 100644 index 0000000..3941668 --- /dev/null +++ b/tests/bundles/smoke.yaml @@ -0,0 +1,70 @@ +bundle: kubernetes +applications: + + mysql: + charm: ch:mysql-k8s + channel: 8.0/stable + scale: 1 + trust: false + + # Currently traefik is required for networking things. + # If this isn't present, the units will hang at "installing agent". + traefik: + charm: ch:traefik-k8s + channel: 1.0/stable + scale: 1 + trust: true + + traefik-public: + charm: ch:traefik-k8s + channel: 1.0/stable + scale: 1 + trust: true + options: + kubernetes-service-annotations: metallb.universe.tf/address-pool=public + + # required for barbican + rabbitmq: + charm: ch:rabbitmq-k8s + channel: 3.9/edge + scale: 1 + trust: true + + keystone: + charm: ch:keystone-k8s + channel: yoga/edge + scale: 1 + trust: true + options: + admin-role: admin + storage: + fernet-keys: 5M + credential-keys: 5M + + barbican: + charm: ../../barbican-k8s.charm + scale: 1 + trust: true + resources: + barbican-api-image: kolla/ubuntu-binary-barbican-api:yoga + barbican-worker-image: kolla/ubuntu-binary-barbican-worker:yoga + +relations: +- - traefik:ingress + - keystone:ingress-internal +- - traefik-public:ingress + - keystone:ingress-public + +- - mysql:database + - keystone:database + +- - mysql:database + - barbican:database +- - rabbitmq:amqp + - barbican:amqp +- - keystone:identity-service + - barbican:identity-service +- - traefik:ingress + - barbican:ingress-internal +- - traefik-public:ingress + - barbican:ingress-public diff --git a/tests/config.yaml b/tests/config.yaml new file mode 120000 index 0000000..e84e89a --- /dev/null +++ b/tests/config.yaml @@ -0,0 +1 @@ +../config.yaml \ No newline at end of file diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e058d64 --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for Barbican operator.""" diff --git a/tests/unit/test_barbican_charm.py b/tests/unit/test_barbican_charm.py new file mode 100644 index 0000000..f26d09a --- /dev/null +++ b/tests/unit/test_barbican_charm.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for Barbican operator.""" + +import mock +import ops_sunbeam.test_utils as test_utils + +import charm + + +class _BarbicanTestOperatorCharm(charm.BarbicanOperatorCharm): + """Test Operator Charm for Barbican Operator.""" + + def __init__(self, framework): + self.seen_events = [] + super().__init__(framework) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + def configure_charm(self, event): + super().configure_charm(event) + self._log_event(event) + + @property + def public_ingress_address(self): + return "barbican.juju" + + +class TestBarbicanOperatorCharm(test_utils.CharmTestCase): + """Unit tests for Barbican Operator.""" + + PATCHES = [] + + @mock.patch( + "charms.observability_libs.v1.kubernetes_service_patch." + "KubernetesServicePatch" + ) + def setUp(self, mock_patch): + """Set up environment for unit test.""" + super().setUp(charm, self.PATCHES) + self.harness = test_utils.get_harness( + _BarbicanTestOperatorCharm, container_calls=self.container_calls + ) + + # clean up events that were dynamically defined, + # otherwise we get issues because they'll be redefined, + # which is not allowed. + from charms.data_platform_libs.v0.database_requires import ( + DatabaseEvents, + ) + + for attr in ( + "database_database_created", + "database_endpoints_changed", + "database_read_only_endpoints_changed", + ): + try: + delattr(DatabaseEvents, attr) + except AttributeError: + pass + + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_pebble_ready_handler(self): + """Test pebble ready handler.""" + self.assertEqual(self.harness.charm.seen_events, []) + test_utils.set_all_pebbles_ready(self.harness) + self.assertEqual(len(self.harness.charm.seen_events), 2) + + def test_all_relations(self): + """Test all integrations for operator.""" + self.harness.set_leader() + test_utils.set_all_pebbles_ready(self.harness) + # this adds all the default/common relations + test_utils.add_all_relations(self.harness) + test_utils.add_complete_ingress_relation(self.harness) + + setup_cmds = [ + ["a2ensite", "wsgi-barbican-api"], + ["sudo", "-u", "barbican", "barbican-manage", "db", "upgrade"], + ] + for cmd in setup_cmds: + self.assertIn(cmd, self.container_calls.execute["barbican-api"]) + config_files = [ + "/etc/apache2/sites-available/wsgi-barbican-api.conf", + "/etc/barbican/barbican.conf", + ] + for f in config_files: + self.check_file("barbican-api", f) + + +def add_db_relation(harness, name) -> str: + """Add db relation.""" + rel_id = harness.add_relation(name, "mysql") + harness.add_relation_unit(rel_id, "mysql/0") + harness.add_relation_unit(rel_id, "mysql/0") + harness.update_relation_data( + rel_id, "mysql/0", {"ingress-address": "10.0.0.3"} + ) + return rel_id diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..a24b369 --- /dev/null +++ b/tox.ini @@ -0,0 +1,157 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + +[tox] +skipsdist = True +envlist = pep8,py3 +sitepackages = False +skip_missing_interpreters = False +minversion = 3.18.0 + +[vars] +src_path = {toxinidir}/src/ +tst_path = {toxinidir}/tests/ +lib_path = {toxinidir}/lib/ +pyproject_toml = {toxinidir}/pyproject.toml +all_path = {[vars]src_path} {[vars]tst_path} + +[testenv] +basepython = python3 +setenv = + PYTHONPATH = {toxinidir}:{[vars]lib_path}:{[vars]src_path} +passenv = + HOME + PYTHONPATH +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +allowlist_externals = + git + charmcraft + {toxinidir}/fetch-libs.sh + {toxinidir}/rename.sh +deps = + -r{toxinidir}/test-requirements.txt + +[testenv:fmt] +description = Apply coding style standards to code +deps = + black + isort +commands = + isort {[vars]all_path} --skip-glob {[vars]lib_path} --skip {toxinidir}/.tox + black --config {[vars]pyproject_toml} {[vars]all_path} --exclude {[vars]lib_path} + +[testenv:build] +basepython = python3 +deps = +commands = + charmcraft -v pack + {toxinidir}/rename.sh + +[testenv:fetch] +basepython = python3 +deps = +commands = + {toxinidir}/fetch-libs.sh + +[testenv:py3] +basepython = python3 +deps = + {[testenv]deps} + -r{toxinidir}/requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = {[testenv:py3]deps} + +[testenv:py39] +basepython = python3.9 +deps = {[testenv:py3]deps} + +[testenv:py310] +basepython = python3.10 +deps = {[testenv:py3]deps} + +[testenv:cover] +basepython = python3 +deps = {[testenv:py3]deps} +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[testenv:pep8] +description = Alias for lint +deps = {[testenv:lint]deps} +commands = {[testenv:lint]commands} + +[testenv:lint] +description = Check code against coding style standards +deps = + black + flake8<6 + flake8-docstrings + flake8-copyright + flake8-builtins + pyproject-flake8 + pep8-naming + isort + codespell +commands = + codespell {[vars]all_path} + # pflake8 wrapper supports config from pyproject.toml + pflake8 --exclude {[vars]lib_path} --config {toxinidir}/pyproject.toml {[vars]all_path} + isort --check-only --diff {[vars]all_path} --skip-glob {[vars]lib_path} + black --config {[vars]pyproject_toml} --check --diff {[vars]all_path} --exclude {[vars]lib_path} + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +setenv = + TEST_MODEL_SETTINGS = automatically-retry-hooks=true + TEST_MAX_RESOLVE_COUNT = 5 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + tests/* + src/templates/* + +[flake8] +ignore=E226,W504