Merge remote-tracking branch 'ops-sunbeam/main' into rename
This commit is contained in:
29
ops-sunbeam/.github/workflows/tox.yaml
vendored
Normal file
29
ops-sunbeam/.github/workflows/tox.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Python package
|
||||
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install tox tox-gh-actions
|
||||
- name: Lint with tox
|
||||
run: tox -e pep8
|
||||
- name: Test with tox
|
||||
run: tox -e py${{ matrix.python-version }}
|
12
ops-sunbeam/.gitignore
vendored
Normal file
12
ops-sunbeam/.gitignore
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
venv/
|
||||
build/
|
||||
.idea/
|
||||
*.charm
|
||||
.tox
|
||||
venv
|
||||
.coverage
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
**.swp
|
||||
.stestr/
|
||||
lib/charms/*
|
5
ops-sunbeam/.gitreview
Normal file
5
ops-sunbeam/.gitreview
Normal file
@@ -0,0 +1,5 @@
|
||||
[gerrit]
|
||||
host=review.opendev.org
|
||||
port=29418
|
||||
project=openstack/charm-ops-sunbeam.git
|
||||
defaultbranch=main
|
3
ops-sunbeam/.stestr.conf
Normal file
3
ops-sunbeam/.stestr.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./tests/unit_tests
|
||||
top_dir=./
|
4
ops-sunbeam/.zuul.yaml
Normal file
4
ops-sunbeam/.zuul.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
- project:
|
||||
templates:
|
||||
- openstack-python3-charm-jobs
|
||||
- openstack-cover-jobs
|
27
ops-sunbeam/README.rst
Normal file
27
ops-sunbeam/README.rst
Normal file
@@ -0,0 +1,27 @@
|
||||
=============================================
|
||||
Sunbeam OpenStack libraries and documentation
|
||||
=============================================
|
||||
|
||||
Tutorials
|
||||
---------
|
||||
|
||||
* `Deploying Sunbeam Charms <doc/deploy-sunbeam-charms.rst>`_
|
||||
* `Writing an OpenStack API charm with Sunbeam <doc/writing-OS-API-charm.rst>`_
|
||||
|
||||
How-Tos
|
||||
-------
|
||||
|
||||
* `How-To write a pebble handler <doc/howto-pebble-handler.rst>`_
|
||||
* `How-To write a relation handler <doc/howto-relation-handler.rst>`_
|
||||
* `How-To write a charm context <doc/howto-config-context.rst>`_
|
||||
* `How-To expose services outside of K8S <doc/howto-expose-services.rst>`_
|
||||
|
||||
Reference
|
||||
---------
|
||||
|
||||
|
||||
|
||||
Concepts
|
||||
--------
|
||||
|
||||
`Sunbeam Concepts <doc/concepts.rst>`_
|
1
ops-sunbeam/cookie-requirements.txt
Normal file
1
ops-sunbeam/cookie-requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
cookiecutter
|
192
ops-sunbeam/doc/bundles/full.yaml
Normal file
192
ops-sunbeam/doc/bundles/full.yaml
Normal file
@@ -0,0 +1,192 @@
|
||||
bundle: kubernetes
|
||||
applications:
|
||||
traefik:
|
||||
charm: ch:traefik-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
traefik-public:
|
||||
charm: ch:traefik-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
kubernetes-service-annotations: metallb.universe.tf/address-pool=public
|
||||
mysql:
|
||||
charm: ch:mysql-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: false
|
||||
rabbitmq:
|
||||
charm: ch:rabbitmq-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
keystone:
|
||||
charm: ch:keystone-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
admin-role: admin
|
||||
storage:
|
||||
fernet-keys: 5M
|
||||
credential-keys: 5M
|
||||
glance:
|
||||
charm: ch:glance-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
storage:
|
||||
local-repository: 5G
|
||||
nova:
|
||||
charm: ch:nova-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
placement:
|
||||
charm: ch:placement-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
neutron:
|
||||
charm: ch:neutron-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
os-public-hostname:
|
||||
ovn-central:
|
||||
charm: ch:ovn-central-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
nova-compute:
|
||||
charm: ch:sunbeam-nova-compute-operator
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
vault:
|
||||
charm: ch:icey-vault-k8s
|
||||
channel: stable
|
||||
scale: 1
|
||||
resources:
|
||||
vault-image: vault
|
||||
horizon:
|
||||
charm: ch:horizon-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
cinder:
|
||||
charm: ch:cinder-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
ovn-relay:
|
||||
charm: ch:ovn-relay-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
cinder-ceph:
|
||||
charm: ch:cinder-ceph-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
|
||||
relations:
|
||||
- - mysql:database
|
||||
- keystone:database
|
||||
- - traefik:ingress
|
||||
- keystone:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- keystone:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- glance:database
|
||||
- - rabbitmq:amqp
|
||||
- glance:amqp
|
||||
- - keystone:identity-service
|
||||
- glance:identity-service
|
||||
- - traefik:ingress
|
||||
- glance:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- glance:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- nova:database
|
||||
- - mysql:database
|
||||
- nova:api-database
|
||||
- - mysql:database
|
||||
- nova:cell-database
|
||||
- - rabbitmq:amqp
|
||||
- nova:amqp
|
||||
- - keystone:identity-service
|
||||
- nova:identity-service
|
||||
- - traefik:ingress
|
||||
- nova:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- nova:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- placement:database
|
||||
- - keystone:identity-service
|
||||
- placement:identity-service
|
||||
- - traefik:ingress
|
||||
- placement:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- placement:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- neutron:database
|
||||
- - rabbitmq:amqp
|
||||
- neutron:amqp
|
||||
- - keystone:identity-service
|
||||
- neutron:identity-service
|
||||
- - traefik:ingress
|
||||
- neutron:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- neutron:ingress-public
|
||||
- - vault:insecure-certificates
|
||||
- neutron:certificates
|
||||
- - neutron:ovsdb-cms
|
||||
- ovn-central:ovsdb-cms
|
||||
|
||||
- - vault:insecure-certificates
|
||||
- ovn-central:certificates
|
||||
|
||||
- - rabbitmq:amqp
|
||||
- nova-compute:amqp
|
||||
- - keystone:identity-credentials
|
||||
- nova-compute:cloud-credentials
|
||||
- - nova:cloud-compute
|
||||
- nova-compute:cloud-compute
|
||||
|
||||
- - mysql:database
|
||||
- horizon:database
|
||||
- - keystone:identity-credentials
|
||||
- horizon:identity-credentials
|
||||
- - traefik:ingress
|
||||
- horizon:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- horizon:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- cinder:database
|
||||
- - rabbitmq:amqp
|
||||
- cinder:amqp
|
||||
- - keystone:identity-service
|
||||
- cinder:identity-service
|
||||
- - traefik:ingress
|
||||
- cinder:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- cinder:ingress-public
|
||||
|
||||
- - vault:insecure-certificates
|
||||
- ovn-relay:certificates
|
||||
|
||||
- - mysql:database
|
||||
- cinder-ceph:database
|
||||
- - rabbitmq:amqp
|
||||
- cinder-ceph:amqp
|
||||
# cinder-ceph must also be related to ceph-mon, but this is a cross-model relation, and untested.
|
||||
# This will most likely involve https://github.com/canonical/microceph in the future.
|
143
ops-sunbeam/doc/bundles/minimal.yaml
Normal file
143
ops-sunbeam/doc/bundles/minimal.yaml
Normal file
@@ -0,0 +1,143 @@
|
||||
bundle: kubernetes
|
||||
|
||||
applications:
|
||||
traefik:
|
||||
charm: ch:traefik-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
traefik-public:
|
||||
charm: ch:traefik-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
kubernetes-service-annotations: metallb.universe.tf/address-pool=public
|
||||
mysql:
|
||||
charm: ch:mysql-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: false
|
||||
rabbitmq:
|
||||
charm: ch:rabbitmq-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
keystone:
|
||||
charm: ch:keystone-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
admin-role: admin
|
||||
storage:
|
||||
fernet-keys: 5M
|
||||
credential-keys: 5M
|
||||
glance:
|
||||
charm: ch:glance-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
storage:
|
||||
local-repository: 5G
|
||||
nova:
|
||||
charm: ch:nova-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
placement:
|
||||
charm: ch:placement-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
neutron:
|
||||
charm: ch:neutron-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
options:
|
||||
os-public-hostname:
|
||||
ovn-central:
|
||||
charm: ch:ovn-central-k8s
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
nova-compute:
|
||||
charm: ch:sunbeam-nova-compute-operator
|
||||
channel: edge
|
||||
scale: 1
|
||||
trust: true
|
||||
vault:
|
||||
charm: ch:icey-vault-k8s
|
||||
channel: stable
|
||||
scale: 1
|
||||
resources:
|
||||
vault-image: vault
|
||||
|
||||
relations:
|
||||
- - mysql:database
|
||||
- keystone:database
|
||||
- - traefik:ingress
|
||||
- keystone:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- keystone:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- glance:database
|
||||
- - rabbitmq:amqp
|
||||
- glance:amqp
|
||||
- - keystone:identity-service
|
||||
- glance:identity-service
|
||||
- - traefik:ingress
|
||||
- glance:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- glance:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- nova:database
|
||||
- - mysql:database
|
||||
- nova:api-database
|
||||
- - mysql:database
|
||||
- nova:cell-database
|
||||
- - rabbitmq:amqp
|
||||
- nova:amqp
|
||||
- - keystone:identity-service
|
||||
- nova:identity-service
|
||||
- - traefik:ingress
|
||||
- nova:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- nova:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- placement:database
|
||||
- - keystone:identity-service
|
||||
- placement:identity-service
|
||||
- - traefik:ingress
|
||||
- placement:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- placement:ingress-public
|
||||
|
||||
- - mysql:database
|
||||
- neutron:database
|
||||
- - rabbitmq:amqp
|
||||
- neutron:amqp
|
||||
- - keystone:identity-service
|
||||
- neutron:identity-service
|
||||
- - traefik:ingress
|
||||
- neutron:ingress-internal
|
||||
- - traefik-public:ingress
|
||||
- neutron:ingress-public
|
||||
- - vault:insecure-certificates
|
||||
- neutron:certificates
|
||||
- - neutron:ovsdb-cms
|
||||
- ovn-central:ovsdb-cms
|
||||
|
||||
- - vault:insecure-certificates
|
||||
- ovn-central:certificates
|
||||
|
||||
- - rabbitmq:amqp
|
||||
- nova-compute:amqp
|
||||
- - keystone:identity-credentials
|
||||
- nova-compute:cloud-credentials
|
||||
- - nova:cloud-compute
|
||||
- nova-compute:cloud-compute
|
127
ops-sunbeam/doc/concepts.rst
Normal file
127
ops-sunbeam/doc/concepts.rst
Normal file
@@ -0,0 +1,127 @@
|
||||
===================================
|
||||
Sunbeam OpenStack OPS Charm Anatomy
|
||||
===================================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
Sunbeam OpenStack is designed to help with writing charms that use the
|
||||
`Charmed Operator Framework <https://juju.is/docs/sdk>`__ and are
|
||||
deployed on Kubernetes. For the rest of this document when a charm is referred
|
||||
to it is implied that it is a Charmed Operator framework charm on Kubernetes.
|
||||
|
||||
In general a charm interacts with relations, renders configuration files and
|
||||
manages services. Sunbeam Ops gives a charm a consistent way of doing this by
|
||||
implementing Container handlers and Relation handlers.
|
||||
|
||||
Relation Handlers
|
||||
-----------------
|
||||
|
||||
The job of a relation handler is to sit between a charm and an interface. This
|
||||
allows the charm to have a consistent way of interacting with an interface
|
||||
even if the charms interfaces vary widely in the way they are implemented. For
|
||||
example the handlers have a `ready` property which indicates whether all
|
||||
required data has been received. They also have a `context` method which
|
||||
takes any data from the interface and creates a dictionary with this data
|
||||
and any additional derived settings.
|
||||
|
||||
The relation handlers also setup event observers allowing them execute any
|
||||
common procedures when events are raised by the interface. When the charm
|
||||
initialises the interface it provides a callback function. The handler method
|
||||
set by the observer first processes the event and then calls the charms
|
||||
callback method passing the event as an argument.
|
||||
|
||||
Required Side Relation Handlers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The handler should be initialised with any information that will need to be
|
||||
sent to the provider charm. Ideally the relation and the handler should not
|
||||
interact directly with the instance of the charm class other than to run the
|
||||
callback method. A required side relation handler should pass the charms
|
||||
`configure_charm` method as the callback method.
|
||||
|
||||
Provider Side Relation Handlers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These are likely to be lightweight as there main purpose is probably to
|
||||
process incoming requests from other charms. The charm should provide a
|
||||
callback method which can process these incoming request.
|
||||
|
||||
Container Handlers
|
||||
------------------
|
||||
|
||||
The job of a container handler is to sit between a charm and a pebble
|
||||
container. This is particularly useful when a set have charms use very
|
||||
similar containers such as a container that provides a WSGI service via
|
||||
Apache.
|
||||
|
||||
The Container handler manages writing configuration files to the container
|
||||
and restarting services. The charm can also query the handler to find the
|
||||
state of the container, configuration within the container and the status
|
||||
of services within the container.
|
||||
|
||||
When a Container handler is initialised the charm passes it a list of
|
||||
`ContainerConfigFile`. These objects instruct the handler which containers
|
||||
a configuration file should be pushed to, the path to the configuration file
|
||||
and the permission the file should have. The charm instructs the handler to
|
||||
write the configuration files by calling the `init_service` method along with
|
||||
a `OPSCharmContexts` object.
|
||||
|
||||
Contexts
|
||||
--------
|
||||
|
||||
ASO supports two different types of context. `ConfigContext` and context from
|
||||
relation handlers. These are all collected together in a single
|
||||
`OPSCharmContexts`. The contexts from relation handlers are in a namespace
|
||||
corresponding to the relation name. `ConfigContext` objects are in a namespace
|
||||
explicitly named when the `ConfigContext` is created.
|
||||
|
||||
Relation Handler Context
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This context is provided by `RelationHandler.context()`. These context includes
|
||||
all properties from the underlying interface and additional derived settings
|
||||
added by the handler.
|
||||
|
||||
Configuration Context
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These context do not relate directly to relations and are mainly a method of
|
||||
sharing common transformations of charm configuration options to configuration
|
||||
file entries. For example a WSGI configuration context might take a charm
|
||||
configuration option, inspect the runtime environment and from the two derive
|
||||
a third setting which is needed in a configuration file.
|
||||
|
||||
Interfaces
|
||||
----------
|
||||
|
||||
An interface should live directly in a charm and be shared via `charmcraft`
|
||||
the only exception to this is the peer relation. ASO provides a base peer
|
||||
interface and peer interface handler. This exposes methods which allow the lead
|
||||
unit of an application to share data with its peers. It also allows a leader to
|
||||
inform its peers when it is ready.
|
||||
|
||||
Templating
|
||||
----------
|
||||
|
||||
Currently templates should be placed in `src/templates/`. If the charm is an
|
||||
OpenStack charm the template file can be placed in the subdirectory relating to
|
||||
the relevant OpenStack release and the correct template will be selected.
|
||||
|
||||
Charms
|
||||
------
|
||||
|
||||
ASO currently provides two base classes to choose from when writing a charm.
|
||||
The first is `OSBaseOperatorCharm` and the second, which is derived from the
|
||||
first, `OSBaseOperatorAPICharm`.
|
||||
|
||||
The base classes setup a default set of relation handlers (based on what
|
||||
relations are present in the charm metadata) and default container handlers.
|
||||
These can easily be overridden by the charm if needed. The callback function
|
||||
passed to the relation handlers is `configure_charm`. The `configure_charm`
|
||||
method calculates whether the charm has all the prerequisites needed to render
|
||||
configuration and start container services.
|
||||
|
||||
The `OSBaseOperatorAPICharm` class assumes that a WSGI service is being
|
||||
configured and so adds the required container handler and configuration needed
|
||||
for this.
|
102
ops-sunbeam/doc/deploy-sunbeam-charms.rst
Normal file
102
ops-sunbeam/doc/deploy-sunbeam-charms.rst
Normal file
@@ -0,0 +1,102 @@
|
||||
============================
|
||||
How-To deploy sunbeam charms
|
||||
============================
|
||||
|
||||
Sunbeam charms requires juju environment with a registered kubernetes cloud.
|
||||
|
||||
Below are the steps to deploy sunbeam charms on `juju with microk8s cloud`_
|
||||
on a single node.
|
||||
|
||||
Install microk8s
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Install microk8s snap
|
||||
|
||||
Run below commands to install microk8s snap
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo snap install microk8s --classic
|
||||
sudo usermod -a -G microk8s $USER
|
||||
sudo chown -f -R $USER ~/.kube
|
||||
su - $USER
|
||||
microk8s status --wait-ready
|
||||
|
||||
2. If required, set proxy variables
|
||||
|
||||
Change the proxy values as per the environment.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo "HTTPS_PROXY=http://squid.internal:3128" >> /var/snap/microk8s/current/args/containerd-env
|
||||
echo "NO_PROXY=10.0.0.0/8,192.168.0.0/16,127.0.0.0/8,172.16.0.0/16" >> /var/snap/microk8s/current/args/containerd-env
|
||||
sudo systemctl restart snap.microk8s.daemon-containerd.service
|
||||
|
||||
3. Enable add-ons
|
||||
|
||||
In the below commands, change the following
|
||||
* ``10.245.160.2`` to point to DNS server
|
||||
* ``10.5.100.100-10.5.100.110`` to IP range allocations for loadbalancers
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
microk8s enable dns:10.245.160.2
|
||||
microk8s enable hostpath-storage
|
||||
microk8s enable metallb:10.5.100.100-10.5.100.110
|
||||
|
||||
Install juju
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Run below commands to install juju controller on microk8s
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo snap install juju --classic
|
||||
juju bootstrap --config controller-service-type=loadbalancer microk8s micro
|
||||
|
||||
Deploy Sunbeam charms
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
See ./reference-bundles.rst for information about example bundles available here.
|
||||
|
||||
To use locally built charms, update the following in the bundle
|
||||
|
||||
* ``charm:`` to point to locally built charm file
|
||||
* ``channel:`` should be commented
|
||||
|
||||
Run below commands to deploy the bundle
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
juju add-model sunbeam
|
||||
juju deploy ./doc/bundles/full.yaml --trust
|
||||
|
||||
Check ``juju status`` and wait for all units to be active.
|
||||
|
||||
Testing OpenStack Control plane
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Install openstackclients snap
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo snap install openstackclients --channel xena/stable
|
||||
|
||||
2. Generate and source the openrc file. (This example requires ``jq`` to be installed.)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
juju run-action --wait keystone/leader get-admin-account --format json | jq -r '.[].results.openrc' > openrc
|
||||
source ./openrc
|
||||
|
||||
3. Run some openstack commands
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
openstack endpoint list
|
||||
|
||||
At this point launching a VM does not work as nova-compute charm does not
|
||||
support bringing up ovn-controller.
|
||||
|
||||
|
||||
.. _`juju with microk8s cloud`: https://juju.is/docs/olm/microk8s
|
56
ops-sunbeam/doc/howto-config-context.rst
Normal file
56
ops-sunbeam/doc/howto-config-context.rst
Normal file
@@ -0,0 +1,56 @@
|
||||
=============================
|
||||
How-To Write a config context
|
||||
=============================
|
||||
|
||||
A config context is an additional context that is passed to the template
|
||||
renderer in its own namespace. They are usually useful when some logic
|
||||
needs to be applied to user supplied charm configuration. The context
|
||||
has access to the charm object.
|
||||
|
||||
Below is an example which applies logic to the charm config as well as
|
||||
collecting the application name to construct the context.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class CinderCephConfigurationContext(ConfigContext):
|
||||
"""Cinder Ceph configuration context."""
|
||||
|
||||
def context(self) -> None:
|
||||
"""Cinder Ceph configuration context."""
|
||||
config = self.charm.model.config.get
|
||||
data_pool_name = config('rbd-pool-name') or self.charm.app.name
|
||||
if config('pool-type') == "erasure-coded":
|
||||
pool_name = (
|
||||
config('ec-rbd-metadata-pool') or
|
||||
f"{data_pool_name}-metadata"
|
||||
)
|
||||
else:
|
||||
pool_name = data_pool_name
|
||||
backend_name = config('volume-backend-name') or self.charm.app.name
|
||||
return {
|
||||
'cluster_name': self.charm.app.name,
|
||||
'rbd_pool': pool_name,
|
||||
'rbd_user': self.charm.app.name,
|
||||
'backend_name': backend_name,
|
||||
'backend_availability_zone': config('backend-availability-zone'),
|
||||
}
|
||||
|
||||
Configuring Charm to use custom config context
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The charm can append the new context onto those provided by the base class.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
|
||||
class MyCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
"""Charm the service."""
|
||||
|
||||
@property
|
||||
def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]:
|
||||
"""Configuration contexts for the operator."""
|
||||
contexts = super().config_contexts
|
||||
contexts.append(
|
||||
sunbeam_ctxts.CinderCephConfigurationContext(self, "cinder_ceph"))
|
||||
return contexts
|
120
ops-sunbeam/doc/howto-expose-services.rst
Normal file
120
ops-sunbeam/doc/howto-expose-services.rst
Normal file
@@ -0,0 +1,120 @@
|
||||
======================================
|
||||
How-To expose a service outside of K8S
|
||||
======================================
|
||||
|
||||
++++++++
|
||||
Overview
|
||||
++++++++
|
||||
|
||||
When Juju deploys an Operator Charm to Kubernetes by default a
|
||||
ClusterIP service entry is created for each application to provide
|
||||
resilient, load balanced access to the services it provides from
|
||||
within the Kubernetes deployment.
|
||||
|
||||
For the majority of OpenStack API services external ingress access
|
||||
is required to the API endpoints from outside of Kubernetes - this
|
||||
is used by both end-users of the cloud as well as from machine
|
||||
based charms supporting OpenStack Hypervisors.
|
||||
|
||||
Operator charms for API or other web services written using Sunbeam
|
||||
OpenStack will automatically patch the Juju created service entry to
|
||||
be of type LoadBalancer, enabling Kubernetes to expose the service to
|
||||
the outside world using a suitable Load Balancer implementation.
|
||||
|
||||
++++++++
|
||||
MicroK8S
|
||||
++++++++
|
||||
|
||||
For a MicroK8S deployment on bare metal MetalLB can be enabled to
|
||||
support this feature:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
microk8s enable metallb
|
||||
|
||||
by default Microk8s will prompt for an IP address pool for MetalLB
|
||||
to use - this can also be provided in the enable command:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
microk8s enable metallb:10.64.140.43-10.64.140.49
|
||||
|
||||
Please refer to the `MicroK8S MetalLB add-on`_ documentation for more
|
||||
details.
|
||||
|
||||
++++++++++++++++++
|
||||
Charmed Kubernetes
|
||||
++++++++++++++++++
|
||||
|
||||
For a Charmed Kubernetes deployment on bare metal MetalLB can also be
|
||||
used for creation of LoadBalancer access to services.
|
||||
|
||||
`Operator Charms for MetalLB`_ exist but don't yet support BGP mode for
|
||||
ECMP (Equal Cost Multi Path) based load balancing by integrating directly
|
||||
into the network infrastructure hosting the Kubernetes deployment.
|
||||
|
||||
For this reason its recommended to use the upstream manifests for
|
||||
deployment of MetalLB with a suitable ConfigMap for the BGP network
|
||||
configuration or Layer 2 configuration depending on the mode of
|
||||
operation desired:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml
|
||||
# On first install only
|
||||
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
|
||||
|
||||
Example ConfigMap for configuration of MetalLB in BGP mode:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config
|
||||
data:
|
||||
config: |
|
||||
peers:
|
||||
- peer-address: 10.0.0.1
|
||||
peer-asn: 64512
|
||||
my-asn: 64512
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: bgp
|
||||
addresses:
|
||||
- 10.64.140.43-10.64.140.49
|
||||
|
||||
IP address pools and BGP peer configuration will be entirely
|
||||
deployment specific.
|
||||
|
||||
++++++++++++++
|
||||
Service Access
|
||||
++++++++++++++
|
||||
|
||||
Once MetalLB has created a LoadBalancer configuration for a service its
|
||||
external IP address will be populated in the service entry. Juju will
|
||||
automatically pick this address for use as the ingress address for the
|
||||
service on relations (which is not ideal for service communication
|
||||
within the Kubernetes deployment)
|
||||
|
||||
The IP address can also be discovered using the juju status command -
|
||||
the Load Balancer external IP will be detailed in the application
|
||||
information:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
$ juju status cinder
|
||||
Model Controller Cloud/Region Version SLA Timestamp
|
||||
sunbeam maas-one k8s-cloud/default 2.9.22 unsupported 11:21:51Z
|
||||
|
||||
App Version Status Scale Charm Store Channel Rev OS Address Message
|
||||
cinder waiting 1 sunbeam-cinder-operator local 0 kubernetes 10.0.0.40 installing agent
|
||||
|
||||
Unit Workload Agent Address Ports Message
|
||||
cinder/0* unknown idle 10.1.73.176
|
||||
|
||||
.. LINKS
|
||||
.. _MicroK8S MetalLB add-on: https://microk8s.io/docs/addon-metallba
|
||||
.. _Operator Charms for MetalLB: https://ubuntu.com/kubernetes/docs/metallb
|
130
ops-sunbeam/doc/howto-pebble-handler.rst
Normal file
130
ops-sunbeam/doc/howto-pebble-handler.rst
Normal file
@@ -0,0 +1,130 @@
|
||||
=============================
|
||||
How-To Write a pebble handler
|
||||
=============================
|
||||
|
||||
A pebble handler sits between a charm and a container it manages. A pebble
|
||||
handler presents the charm with a consistent method of interaction with
|
||||
the container. For example the charm can query the handler to check config
|
||||
has been rendered and services started. It can call the `execute` method
|
||||
to run commands in the container or call `write_config` to render the
|
||||
defined files into the container.
|
||||
|
||||
Common Pebble handler changes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ASO provides a pebble handler base classes which provide the starting point
|
||||
for writing a new handler. If the container runs a service then the
|
||||
`ServicePebbleHandler` should be used. If the container does not provide a
|
||||
service (perhaps it's just an environment for executing commands that affect
|
||||
other containers) then `PebbleHandler` should be used.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import ops_sunbeam.container_handlers as sunbeam_chandlers
|
||||
|
||||
class MyServicePebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||
"""Manage MyService Container."""
|
||||
|
||||
The handlers can create directories in the container once the pebble is
|
||||
available.
|
||||
|
||||
.. code:: python
|
||||
|
||||
@property
|
||||
def directories(self) -> List[sunbeam_chandlers.ContainerDir]:
|
||||
"""Directories to create in container."""
|
||||
return [
|
||||
sunbeam_chandlers.ContainerDir(
|
||||
'/var/log/my-service',
|
||||
'root',
|
||||
'root')]
|
||||
|
||||
In addition to directories the handler can list configuration files which need
|
||||
to be rendered into the container. These will be rendered as templates using
|
||||
all available contexts.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def default_container_configs(
|
||||
self
|
||||
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Files to render into containers."""
|
||||
return [
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
'/etc/mysvc/mvsvc.conf',
|
||||
'root',
|
||||
'root')]
|
||||
|
||||
If a service should be running in the container the handler specifies the
|
||||
layer describing the service that will be passed to pebble.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Pebble configuration layer for MyService service."""
|
||||
return {
|
||||
"summary": "My service",
|
||||
"description": "Pebble config layer for MyService",
|
||||
"services": {
|
||||
'my_svc': {
|
||||
"override": "replace",
|
||||
"summary": "My Super Service",
|
||||
"command": "/usr/bin/my-svc",
|
||||
"startup": "disabled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Advanced Pebble handler changes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
By default the pebble handler is the observer of pebble events. If this
|
||||
behaviour needs to be altered then `setup_pebble_handler` method can be
|
||||
changed.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def setup_pebble_handler(self) -> None:
|
||||
"""Configure handler for pebble ready event."""
|
||||
pass
|
||||
|
||||
Or perhaps it is ok for the pebble handler to observe the event but a
|
||||
different reaction is required. In this case the method associated
|
||||
with the event can be overridden.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def _on_service_pebble_ready(
|
||||
self, event: ops.charm.PebbleReadyEvent
|
||||
) -> None:
|
||||
"""Handle pebble ready event."""
|
||||
container = event.workload
|
||||
container.add_layer(self.service_name, self.get_layer(), combine=True)
|
||||
self.execute(["run", "special", "command"])
|
||||
logger.debug(f"Plan: {container.get_plan()}")
|
||||
self.ready = True
|
||||
self.charm.configure_charm(event)
|
||||
|
||||
Configuring Charm to use custom pebble handler
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The charms `get_pebble_handlers` method dictates which pebble handlers are used.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class MyCharmCharm(NeutronOperatorCharm):
|
||||
|
||||
def get_pebble_handlers(self) -> List[sunbeam_chandlers.PebbleHandler]:
|
||||
"""Pebble handlers for the service."""
|
||||
return [
|
||||
MyServicePebbleHandler(
|
||||
self,
|
||||
'my-server-container',
|
||||
self.service_name,
|
||||
self.container_configs,
|
||||
self.template_dir,
|
||||
self.openstack_release,
|
||||
self.configure_charm,
|
||||
)
|
||||
]
|
139
ops-sunbeam/doc/howto-relation-handler.rst
Normal file
139
ops-sunbeam/doc/howto-relation-handler.rst
Normal file
@@ -0,0 +1,139 @@
|
||||
===============================
|
||||
How-To Write a relation handler
|
||||
===============================
|
||||
|
||||
A relation handler gives the charm a consistent method of interacting with
|
||||
relation interfaces. It can also encapsulate common interface tasks, this
|
||||
removes the need for duplicate code across multiple charms.
|
||||
|
||||
This how-to will walk through the steps to write a database relation handler
|
||||
for the requires side.
|
||||
|
||||
In this database interface the database charm expects the client to provide the name
|
||||
of the database(s) to be created. To model this the relation handler will require
|
||||
the charm to specify the database name(s) when the class is instantiated.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class DBHandler(RelationHandler):
|
||||
"""Handler for DB relations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: ops.charm.CharmBase,
|
||||
relation_name: str,
|
||||
callback_f: Callable,
|
||||
databases: List[str] = None,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
self.databases = databases
|
||||
super().__init__(charm, relation_name, callback_f)
|
||||
|
||||
The handler initialises the interface with the database names and also sets up
|
||||
an observer for relation changed events.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def setup_event_handler(self) -> ops.charm.Object:
|
||||
"""Configure event handlers for a MySQL relation."""
|
||||
logger.debug("Setting up DB event handler")
|
||||
# Lazy import to ensure this lib is only required if the charm
|
||||
# has this relation.
|
||||
import charms.sunbeam_mysql_k8s.v0.mysql as mysql
|
||||
db = mysql.MySQLConsumer(
|
||||
self.charm, self.relation_name, databases=self.databases
|
||||
)
|
||||
_rname = self.relation_name.replace("-", "_")
|
||||
db_relation_event = getattr(
|
||||
self.charm.on, f"{_rname}_relation_changed"
|
||||
)
|
||||
self.framework.observe(db_relation_event, self._on_database_changed)
|
||||
return db
|
||||
|
||||
The method runs when the changed event is seen and checks whether all required
|
||||
data has been provided. If it is then it calls back to the charm, if not then
|
||||
no action is taken.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def _on_database_changed(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle database change events."""
|
||||
databases = self.interface.databases()
|
||||
logger.info(f"Received databases: {databases}")
|
||||
if not self.ready:
|
||||
return
|
||||
self.callback_f(event)
|
||||
|
||||
@property
|
||||
def ready(self) -> bool:
|
||||
"""Whether the handler is ready for use."""
|
||||
try:
|
||||
# Nothing to wait for
|
||||
return bool(self.interface.databases())
|
||||
except (AttributeError, KeyError):
|
||||
return False
|
||||
|
||||
The `ready` property is common across all handlers and allows the charm to
|
||||
check the state of any relation in a consistent way.
|
||||
|
||||
The relation handlers also provide a context which can be used when rendering
|
||||
templates. ASO places each relation context in its own namespace.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Context containing database connection data."""
|
||||
try:
|
||||
databases = self.interface.databases()
|
||||
except (AttributeError, KeyError):
|
||||
return {}
|
||||
if not databases:
|
||||
return {}
|
||||
ctxt = {}
|
||||
conn_data = {
|
||||
"database_host": self.interface.credentials().get("address"),
|
||||
"database_password": self.interface.credentials().get("password"),
|
||||
"database_user": self.interface.credentials().get("username"),
|
||||
"database_type": "mysql+pymysql",
|
||||
}
|
||||
|
||||
for db in self.interface.databases():
|
||||
ctxt[db] = {"database": db}
|
||||
ctxt[db].update(conn_data)
|
||||
connection = (
|
||||
"{database_type}://{database_user}:{database_password}"
|
||||
"@{database_host}/{database}")
|
||||
if conn_data.get("database_ssl_ca"):
|
||||
connection = connection + "?ssl_ca={database_ssl_ca}"
|
||||
if conn_data.get("database_ssl_cert"):
|
||||
connection = connection + (
|
||||
"&ssl_cert={database_ssl_cert}"
|
||||
"&ssl_key={database_ssl_key}")
|
||||
ctxt[db]["connection"] = str(connection.format(
|
||||
**ctxt[db]))
|
||||
return ctxt
|
||||
|
||||
Configuring Charm to use custom relation handler
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The base class will add the default relation handlers for any interfaces
|
||||
which do not yet have a handler. Therefore the custom handler is added to
|
||||
the list and then passed to the super method. The base charm class will
|
||||
see a handler already exists for database and not add the default one.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class MyCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
"""Charm the service."""
|
||||
|
||||
def get_relation_handlers(self, handlers=None) -> List[
|
||||
sunbeam_rhandlers.RelationHandler]:
|
||||
"""Relation handlers for the service."""
|
||||
handlers = handlers or []
|
||||
if self.can_add_handler("database", handlers):
|
||||
self.db = sunbeam_rhandlers.DBHandler(
|
||||
self, "database", self.configure_charm, self.databases
|
||||
)
|
||||
handlers.append(self.db)
|
||||
handlers = super().get_relation_handlers(handlers)
|
||||
return handlers
|
21
ops-sunbeam/doc/reference-bundles.rst
Normal file
21
ops-sunbeam/doc/reference-bundles.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
=================
|
||||
Reference Bundles
|
||||
=================
|
||||
|
||||
There are some official reference bundles in `./bundles/`:
|
||||
|
||||
|
||||
`minimal.yaml`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The baseline "here's a barebones OpenStack" that can be deployed on a k8s cloud.
|
||||
|
||||
|
||||
`full.yaml`
|
||||
~~~~~~~~~~~
|
||||
|
||||
All the things that can be deployed on a k8s cloud.
|
||||
As-is, `cinder-ceph` will not come up active
|
||||
because it requires a relation to a `ceph-mon`.
|
||||
However this may be replaced with some configuration
|
||||
to connect to https://github.com/canonical/microceph in the future.
|
36
ops-sunbeam/doc/troubleshooting-sunbeam-deployment.md
Normal file
36
ops-sunbeam/doc/troubleshooting-sunbeam-deployment.md
Normal file
@@ -0,0 +1,36 @@
|
||||
Some miscellaneous debugging notes.
|
||||
|
||||
## stuck on ``waiting ... installing agent``.
|
||||
|
||||
If many units are stuck in waiting status at the installing agent step,
|
||||
and traefik charms have the status message "gateway address unavailable",
|
||||
then check that the k8s undercloud has a form of ingress enabled.
|
||||
|
||||
An easy way to enable ingress with microk8s
|
||||
is to enable metallb, and give it a block of ip addresses.
|
||||
Currently these ip addresses aren't used for anything with sunbeam,
|
||||
so it doesn't matter what you use.
|
||||
A simple option is to pick a small range on your current LAN for example.
|
||||
|
||||
|
||||
## Accessing remote microk8s
|
||||
|
||||
If you have microk8s running on a remote server,
|
||||
and you want to access it from juju and the openstack client locally,
|
||||
here are some guidelines.
|
||||
|
||||
1. Run `microk8s.config` on the remote server.
|
||||
2. Copy the output to `~/.kube/config` on your local machine (so we now have credentials).
|
||||
3. Edit `~/.kube/config` and update the server url/ip to point to the remote server.
|
||||
4. Check firewall rules on the remote server to ensure you'll have access to the k8s and openstack ports.
|
||||
5. Add a standard k8s cluster to your local juju client: `juju add-k8s my-microk8s`
|
||||
6. At this point the k8s cloud is registered and you can deploy bundles with juju.
|
||||
7. To access the sunbeam openstack you will need some kind of routing though.
|
||||
`sshuttle` is a useful and simple tool to achieve this. Try `sshuttle -r <remote_server> <subnet_to_forward>`.
|
||||
Eg. `sshuttle -r ubuntu@192.168.1.103 10.152.0.0/16`, assuming that 10.152.0.0/16 is the local subnet on the remote server allocated to microk8s pods.
|
||||
|
||||
|
||||
## Cannot launch openstack instances.
|
||||
|
||||
It's a known issue that currently it's impossible to launch instances with the sunbeam openstack.
|
||||
Most other things should work though.
|
157
ops-sunbeam/doc/writing-OS-API-charm.rst
Normal file
157
ops-sunbeam/doc/writing-OS-API-charm.rst
Normal file
@@ -0,0 +1,157 @@
|
||||
=============
|
||||
New API Charm
|
||||
=============
|
||||
|
||||
The example below will walk through the creation of a basic API charm for the
|
||||
OpenStack `Ironic <https://wiki.openstack.org/wiki/Ironic>`__ service designed
|
||||
to run on kubernetes.
|
||||
|
||||
Create the skeleton charm
|
||||
=========================
|
||||
|
||||
Prerequisite
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Build a base geneeric charm with the `charmcraft` tool.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
mkdir charm-ironic-k8s
|
||||
cd charm-ironic-k8s
|
||||
charmcraft init --author $USER --name ironic-k8s
|
||||
|
||||
Add ASO common files to new charm. The script will ask a few basic questions:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
git clone https://opendev.org/openstack/charm-ops-sunbeam
|
||||
cd charm-ops-sunbeam
|
||||
./sunbeam-charm-init.sh ~/charm-ironic-k8s
|
||||
|
||||
This tool is designed to be used after 'charmcraft init' was initially run
|
||||
service_name [ironic]: ironic
|
||||
charm_name [ironic-k8s]: ironic-k8s
|
||||
ingress_port []: 6385
|
||||
db_sync_command [] ironic-dbsync --config-file /etc/ironic/ironic.conf create_schema:
|
||||
|
||||
Fetch interface libs corresponding to the requires interfaces:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
cd charm-ironic-k8s
|
||||
charmcraft login --export ~/secrets.auth
|
||||
export CHARMCRAFT_AUTH=$(cat ~/secrets.auth)
|
||||
charmcraft fetch-lib charms.nginx_ingress_integrator.v0.ingress
|
||||
charmcraft fetch-lib charms.data_platform_libs.v0.database_requires
|
||||
charmcraft fetch-lib charms.keystone_k8s.v1.identity_service
|
||||
charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
|
||||
charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
||||
|
||||
Templates
|
||||
=========
|
||||
|
||||
Much of the service configuration is covered by common templates which were copied
|
||||
into the charm in the previous step. The only additional template for this charm
|
||||
is for `ironic.conf`. Add the following into `./src/templates/ironic.conf.j2`
|
||||
|
||||
.. code::
|
||||
|
||||
[DEFAULT]
|
||||
debug = {{ options.debug }}
|
||||
auth_strategy=keystone
|
||||
transport_url = {{ amqp.transport_url }}
|
||||
|
||||
[keystone_authtoken]
|
||||
{% include "parts/identity-data" %}
|
||||
|
||||
[database]
|
||||
{% include "parts/database-connection" %}
|
||||
|
||||
[neutron]
|
||||
{% include "parts/identity-data" %}
|
||||
|
||||
[glance]
|
||||
{% include "parts/identity-data" %}
|
||||
|
||||
[cinder]
|
||||
{% include "parts/identity-data" %}
|
||||
|
||||
[service_catalog]
|
||||
{% include "parts/identity-data" %}
|
||||
|
||||
|
||||
Make charm deployable
|
||||
=====================
|
||||
|
||||
The next step is to pack the charm into a deployable format
|
||||
|
||||
.. code:: bash
|
||||
|
||||
cd charm-ironic-k8s
|
||||
charmcraft pack
|
||||
|
||||
|
||||
Deploy Charm
|
||||
============
|
||||
|
||||
The charm can now be deployed. The Kolla project has images that can be used to
|
||||
run the service. Juju can pull the image directly from dockerhub.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
juju deploy ./ironic-k8s_ubuntu-20.04-amd64.charm --resource ironic-api-image=kolla/ubuntu-binary-ironic-api:yoga ironic
|
||||
juju relate ironic mysql
|
||||
juju relate ironic keystone
|
||||
juju relate ironic rabbitmq
|
||||
juju relate ironic:ingress-internal traefik:ingress
|
||||
juju relate ironic:ingress-public traefik:ingress
|
||||
|
||||
Test Service
|
||||
============
|
||||
|
||||
Check that the juju status shows the charms is active and no error messages are
|
||||
preset. Then check the ironic api service is responding.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ juju status ironic
|
||||
Model Controller Cloud/Region Version SLA Timestamp
|
||||
ks micro microk8s/localhost 2.9.22 unsupported 13:31:41Z
|
||||
|
||||
App Version Status Scale Charm Store Channel Rev OS Address Message
|
||||
ironic active 1 ironic-k8s local 0 kubernetes 10.152.183.73
|
||||
|
||||
Unit Workload Agent Address Ports Message
|
||||
ironic/0* active idle 10.1.155.106
|
||||
|
||||
$ curl http://10.1.155.106:6385 | jq '.'
|
||||
{
|
||||
"name": "OpenStack Ironic API",
|
||||
"description": "Ironic is an OpenStack project which aims to provision baremetal machines.",
|
||||
"default_version": {
|
||||
"id": "v1",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://10.1.155.106:6385/v1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"status": "CURRENT",
|
||||
"min_version": "1.1",
|
||||
"version": "1.72"
|
||||
},
|
||||
"versions": [
|
||||
{
|
||||
"id": "v1",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://10.1.155.106:6385/v1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"status": "CURRENT",
|
||||
"min_version": "1.1",
|
||||
"version": "1.72"
|
||||
}
|
||||
]
|
||||
}
|
18
ops-sunbeam/fetch-libs.sh
Executable file
18
ops-sunbeam/fetch-libs.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NOTE: this only fetches libs for use in unit tests here.
|
||||
# Charms that depend on this library should fetch these libs themselves.
|
||||
|
||||
echo "WARNING: Charm interface libs are excluded from ASO python package."
|
||||
charmcraft fetch-lib charms.nginx_ingress_integrator.v0.ingress
|
||||
charmcraft fetch-lib charms.data_platform_libs.v0.database_requires
|
||||
charmcraft fetch-lib charms.keystone_k8s.v1.identity_service
|
||||
charmcraft fetch-lib charms.keystone_k8s.v0.identity_credentials
|
||||
charmcraft fetch-lib charms.keystone_k8s.v0.identity_resource
|
||||
charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
|
||||
charmcraft fetch-lib charms.ovn_central_k8s.v0.ovsdb
|
||||
charmcraft fetch-lib charms.traefik_k8s.v2.ingress
|
||||
charmcraft fetch-lib charms.ceilometer_k8s.v0.ceilometer_service
|
||||
charmcraft fetch-lib charms.cinder_ceph_k8s.v0.ceph_access
|
||||
echo "Copying libs to to unit_test dir"
|
||||
rsync --recursive --delete lib/ tests/lib/
|
15
ops-sunbeam/ops_sunbeam/__init__.py
Normal file
15
ops-sunbeam/ops_sunbeam/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Library for shared code for ops charms."""
|
981
ops-sunbeam/ops_sunbeam/charm.py
Normal file
981
ops-sunbeam/ops_sunbeam/charm.py
Normal file
@@ -0,0 +1,981 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining a charm using the Operator framework.
|
||||
|
||||
This library provided OSBaseOperatorCharm and OSBaseOperatorAPICharm. The
|
||||
charm classes use ops_sunbeam.relation_handlers.RelationHandler objects
|
||||
to interact with relations. These objects also provide contexts which
|
||||
can be used when defining templates.
|
||||
|
||||
In addition to the Relation handlers the charm class can also use
|
||||
ops_sunbeam.config_contexts.ConfigContext objects which can be
|
||||
used when rendering templates, these are not specific to a relation.
|
||||
|
||||
The charm class interacts with the containers it is managing via
|
||||
ops_sunbeam.container_handlers.PebbleHandler. The PebbleHandler
|
||||
defines the pebble layers, manages pushing configuration to the
|
||||
containers and managing the service running in the container.
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
import logging
|
||||
import urllib
|
||||
from typing import (
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
)
|
||||
|
||||
import ops.charm
|
||||
import ops.framework
|
||||
import ops.model
|
||||
import ops.pebble
|
||||
import ops.storage
|
||||
import tenacity
|
||||
from lightkube import (
|
||||
Client,
|
||||
)
|
||||
from lightkube.resources.core_v1 import (
|
||||
Service,
|
||||
)
|
||||
from ops.charm import (
|
||||
SecretChangedEvent,
|
||||
SecretRemoveEvent,
|
||||
SecretRotateEvent,
|
||||
)
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
MaintenanceStatus,
|
||||
)
|
||||
|
||||
import ops_sunbeam.compound_status as compound_status
|
||||
import ops_sunbeam.config_contexts as sunbeam_config_contexts
|
||||
import ops_sunbeam.container_handlers as sunbeam_chandlers
|
||||
import ops_sunbeam.core as sunbeam_core
|
||||
import ops_sunbeam.guard as sunbeam_guard
|
||||
import ops_sunbeam.job_ctrl as sunbeam_job_ctrl
|
||||
import ops_sunbeam.relation_handlers as sunbeam_rhandlers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OSBaseOperatorCharm(ops.charm.CharmBase):
|
||||
"""Base charms for OpenStack operators."""
|
||||
|
||||
_state = ops.framework.StoredState()
|
||||
|
||||
# Holds set of mandatory relations
|
||||
mandatory_relations = set()
|
||||
|
||||
def __init__(self, framework: ops.framework.Framework) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(framework)
|
||||
if isinstance(self.framework._storage, ops.storage.JujuStorage):
|
||||
raise ValueError(
|
||||
(
|
||||
"use_juju_for_storage=True is deprecated and not supported "
|
||||
"by ops_sunbeam"
|
||||
)
|
||||
)
|
||||
# unit_bootstrapped is stored in the local unit storage which is lost
|
||||
# when the pod is replaced, so this will revert to False on charm
|
||||
# upgrade or upgrade of the payload container.
|
||||
self._state.set_default(unit_bootstrapped=False)
|
||||
self.status = compound_status.Status("workload", priority=100)
|
||||
self.status_pool = compound_status.StatusPool(self)
|
||||
self.status_pool.add(self.status)
|
||||
self.relation_handlers = self.get_relation_handlers()
|
||||
self.bootstrap_status = compound_status.Status(
|
||||
"bootstrap", priority=90
|
||||
)
|
||||
self.status_pool.add(self.bootstrap_status)
|
||||
if not self.bootstrapped():
|
||||
self.bootstrap_status.set(
|
||||
MaintenanceStatus("Service not bootstrapped")
|
||||
)
|
||||
self.framework.observe(self.on.config_changed, self._on_config_changed)
|
||||
self.framework.observe(self.on.secret_changed, self._on_secret_changed)
|
||||
self.framework.observe(self.on.secret_rotate, self._on_secret_rotate)
|
||||
self.framework.observe(self.on.secret_remove, self._on_secret_remove)
|
||||
|
||||
def can_add_handler(
|
||||
self,
|
||||
relation_name: str,
|
||||
handlers: List[sunbeam_rhandlers.RelationHandler],
|
||||
) -> bool:
|
||||
"""Whether a handler for the given relation can be added."""
|
||||
if relation_name not in self.meta.relations.keys():
|
||||
logging.debug(
|
||||
f"Cannot add handler for relation {relation_name}, relation "
|
||||
"not present in charm metadata"
|
||||
)
|
||||
return False
|
||||
if relation_name in [h.relation_name for h in handlers]:
|
||||
logging.debug(
|
||||
f"Cannot add handler for relation {relation_name}, handler "
|
||||
"already present"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_relation_handlers(
|
||||
self, handlers: List[sunbeam_rhandlers.RelationHandler] = None
|
||||
) -> List[sunbeam_rhandlers.RelationHandler]:
|
||||
"""Relation handlers for the service."""
|
||||
handlers = handlers or []
|
||||
if self.can_add_handler("amqp", handlers):
|
||||
self.amqp = sunbeam_rhandlers.RabbitMQHandler(
|
||||
self,
|
||||
"amqp",
|
||||
self.configure_charm,
|
||||
self.config.get("rabbit-user") or self.service_name,
|
||||
self.config.get("rabbit-vhost") or "openstack",
|
||||
"amqp" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.amqp)
|
||||
self.dbs = {}
|
||||
for relation_name, database_name in self.databases.items():
|
||||
if self.can_add_handler(relation_name, handlers):
|
||||
db = sunbeam_rhandlers.DBHandler(
|
||||
self,
|
||||
relation_name,
|
||||
self.configure_charm,
|
||||
database_name,
|
||||
relation_name in self.mandatory_relations,
|
||||
)
|
||||
self.dbs[relation_name] = db
|
||||
handlers.append(db)
|
||||
if self.can_add_handler("peers", handlers):
|
||||
self.peers = sunbeam_rhandlers.BasePeerHandler(
|
||||
self, "peers", self.configure_charm, False
|
||||
)
|
||||
handlers.append(self.peers)
|
||||
if self.can_add_handler("certificates", handlers):
|
||||
self.certs = sunbeam_rhandlers.TlsCertificatesHandler(
|
||||
self,
|
||||
"certificates",
|
||||
self.configure_charm,
|
||||
sans_dns=self.get_sans_dns(),
|
||||
sans_ips=self.get_sans_ips(),
|
||||
mandatory="certificates" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.certs)
|
||||
if self.can_add_handler("identity-credentials", handlers):
|
||||
self.ccreds = sunbeam_rhandlers.IdentityCredentialsRequiresHandler(
|
||||
self,
|
||||
"identity-credentials",
|
||||
self.configure_charm,
|
||||
"identity-credentials" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.ccreds)
|
||||
if self.can_add_handler("ceph-access", handlers):
|
||||
self.ceph_access = sunbeam_rhandlers.CephAccessRequiresHandler(
|
||||
self,
|
||||
"ceph-access",
|
||||
self.configure_charm,
|
||||
"ceph-access" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.ceph_access)
|
||||
return handlers
|
||||
|
||||
def get_sans_ips(self) -> List[str]:
|
||||
"""Return Subject Alternate Names to use in cert for service."""
|
||||
str_ips_sans = [str(s) for s in self._ip_sans()]
|
||||
return list(set(str_ips_sans))
|
||||
|
||||
def get_sans_dns(self) -> List[str]:
|
||||
"""Return Subject Alternate Names to use in cert for service."""
|
||||
return list(set(self.get_domain_name_sans()))
|
||||
|
||||
def _ip_sans(self) -> List[ipaddress.IPv4Address]:
|
||||
"""Get IP addresses for service."""
|
||||
ip_sans = []
|
||||
for relation_name in self.meta.relations.keys():
|
||||
for relation in self.framework.model.relations.get(
|
||||
relation_name, []
|
||||
):
|
||||
binding = self.model.get_binding(relation)
|
||||
ip_sans.append(binding.network.ingress_address)
|
||||
ip_sans.append(binding.network.bind_address)
|
||||
|
||||
for binding_name in ["public"]:
|
||||
try:
|
||||
binding = self.model.get_binding(binding_name)
|
||||
ip_sans.append(binding.network.ingress_address)
|
||||
ip_sans.append(binding.network.bind_address)
|
||||
except ops.model.ModelError:
|
||||
logging.debug(f"No binding found for {binding_name}")
|
||||
return ip_sans
|
||||
|
||||
def get_domain_name_sans(self) -> List[str]:
|
||||
"""Get Domain names for service."""
|
||||
domain_name_sans = []
|
||||
for binding_config in ["admin", "internal", "public"]:
|
||||
hostname = self.config.get(f"os-{binding_config}-hostname")
|
||||
if hostname:
|
||||
domain_name_sans.append(hostname)
|
||||
return domain_name_sans
|
||||
|
||||
def check_leader_ready(self):
|
||||
"""Check the leader is reporting as ready."""
|
||||
if self.supports_peer_relation and not (
|
||||
self.unit.is_leader() or self.is_leader_ready()
|
||||
):
|
||||
raise sunbeam_guard.WaitingExceptionError("Leader not ready")
|
||||
|
||||
def check_relation_handlers_ready(self, event: ops.framework.EventBase):
|
||||
"""Check all relation handlers are ready."""
|
||||
not_ready_relations = self.get_mandatory_relations_not_ready(event)
|
||||
if not_ready_relations:
|
||||
logger.info(f"Relations {not_ready_relations} incomplete")
|
||||
self.stop_services(not_ready_relations)
|
||||
raise sunbeam_guard.WaitingExceptionError(
|
||||
"Not all relations are ready"
|
||||
)
|
||||
|
||||
def update_relations(self):
|
||||
"""Update relation data."""
|
||||
for handler in self.relation_handlers:
|
||||
try:
|
||||
handler.update_relation_data()
|
||||
except NotImplementedError:
|
||||
logging.debug(f"send_requests not implemented for {handler}")
|
||||
|
||||
def configure_unit(self, event: ops.framework.EventBase) -> None:
|
||||
"""Run configuration on this unit."""
|
||||
self.check_leader_ready()
|
||||
self.check_relation_handlers_ready(event)
|
||||
self._state.unit_bootstrapped = True
|
||||
|
||||
def configure_app_leader(self, event):
|
||||
"""Run global app setup.
|
||||
|
||||
These are tasks that should only be run once per application and only
|
||||
the leader runs them.
|
||||
"""
|
||||
self.set_leader_ready()
|
||||
|
||||
def configure_app_non_leader(self, event):
|
||||
"""Setup steps for a non-leader after leader has bootstrapped."""
|
||||
if not self.bootstrapped():
|
||||
raise sunbeam_guard.WaitingExceptionError("Leader not ready")
|
||||
|
||||
def configure_app(self, event):
|
||||
"""Check on (and run if leader) app wide tasks."""
|
||||
if self.unit.is_leader():
|
||||
self.configure_app_leader(event)
|
||||
else:
|
||||
self.configure_app_non_leader(event)
|
||||
|
||||
def post_config_setup(self):
|
||||
"""Configuration steps after services have been setup."""
|
||||
logger.info("Setting active status")
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
def configure_charm(self, event: ops.framework.EventBase) -> None:
|
||||
"""Catchall handler to configure charm services."""
|
||||
with sunbeam_guard.guard(self, "Bootstrapping"):
|
||||
# Publishing relation data may be dependent on something else (like
|
||||
# receiving a piece of data from the leader). To cover that
|
||||
# republish relation if the relation adapter has implemented an
|
||||
# update method.
|
||||
self.update_relations()
|
||||
self.configure_unit(event)
|
||||
self.configure_app(event)
|
||||
self.bootstrap_status.set(ActiveStatus())
|
||||
self.post_config_setup()
|
||||
|
||||
def stop_services(self, relation: Optional[Set[str]] = None) -> None:
|
||||
"""Stop all running services."""
|
||||
# Machine charms should implement this function if required.
|
||||
|
||||
@property
|
||||
def supports_peer_relation(self) -> bool:
|
||||
"""Whether the charm support the peers relation."""
|
||||
return "peers" in self.meta.relations.keys()
|
||||
|
||||
@property
|
||||
def config_contexts(
|
||||
self,
|
||||
) -> List[sunbeam_config_contexts.CharmConfigContext]:
|
||||
"""Return the configuration adapters for the operator."""
|
||||
return [sunbeam_config_contexts.CharmConfigContext(self, "options")]
|
||||
|
||||
@property
|
||||
def _unused_handler_prefix(self) -> str:
|
||||
"""Prefix for handlers."""
|
||||
return self.service_name.replace("-", "_")
|
||||
|
||||
@property
|
||||
def template_dir(self) -> str:
|
||||
"""Directory containing Jinja2 templates."""
|
||||
return "src/templates"
|
||||
|
||||
@property
|
||||
def databases(self) -> Mapping[str, str]:
|
||||
"""Return a mapping of database relation names to database names.
|
||||
|
||||
Use this to define the databases required by an application.
|
||||
|
||||
All entries here
|
||||
that have a corresponding relation defined in metadata
|
||||
will automatically have a a DBHandler instance set up for it,
|
||||
and assigned to `charm.dbs[relation_name]`.
|
||||
Entries that don't have a matching relation in metadata
|
||||
will be ignored.
|
||||
Note that the relation interface type is expected to be 'mysql_client'.
|
||||
|
||||
It defaults to loading a relation named "database",
|
||||
with the database named after the service name.
|
||||
"""
|
||||
return {"database": self.service_name.replace("-", "_")}
|
||||
|
||||
def _on_config_changed(self, event: ops.framework.EventBase) -> None:
|
||||
self.configure_charm(event)
|
||||
|
||||
def _on_secret_changed(self, event: SecretChangedEvent) -> None:
|
||||
# By default read the latest content of secret
|
||||
# this will allow juju to trigger secret-remove
|
||||
# event for old revision
|
||||
event.secret.get_content(refresh=True)
|
||||
self.configure_charm(event)
|
||||
|
||||
def _on_secret_rotate(self, event: SecretRotateEvent) -> None:
|
||||
# Placeholder to handle secret rotate event
|
||||
# charms should handle the event if required
|
||||
pass
|
||||
|
||||
def _on_secret_remove(self, event: SecretRemoveEvent) -> None:
|
||||
# Placeholder to handle secret remove event
|
||||
# charms should handle the event if required
|
||||
pass
|
||||
|
||||
def check_broken_relations(
|
||||
self, relations: set, event: ops.framework.EventBase
|
||||
) -> set:
|
||||
"""Return all broken relations on given set of relations."""
|
||||
broken_relations = set()
|
||||
|
||||
# Check for each relation if the event is gone away event.
|
||||
# lazy import the events
|
||||
# Note: Ceph relation not handled as there is no gone away event.
|
||||
for relation in relations:
|
||||
_is_broken = False
|
||||
match relation:
|
||||
case "database" | "api-database" | "cell-database":
|
||||
from ops.charm import (
|
||||
RelationBrokenEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, RelationBrokenEvent):
|
||||
_is_broken = True
|
||||
case "ingress-public" | "ingress-internal":
|
||||
from charms.traefik_k8s.v2.ingress import (
|
||||
IngressPerAppRevokedEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, IngressPerAppRevokedEvent):
|
||||
_is_broken = True
|
||||
case "identity-service":
|
||||
from charms.keystone_k8s.v1.identity_service import (
|
||||
IdentityServiceGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, IdentityServiceGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "amqp":
|
||||
from charms.rabbitmq_k8s.v0.rabbitmq import (
|
||||
RabbitMQGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, RabbitMQGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "certificates":
|
||||
from charms.tls_certificates_interface.v1.tls_certificates import (
|
||||
CertificateExpiredEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, CertificateExpiredEvent):
|
||||
_is_broken = True
|
||||
case "ovsdb-cms":
|
||||
from charms.ovn_central_k8s.v0.ovsdb import (
|
||||
OVSDBCMSGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, OVSDBCMSGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "identity-credentials":
|
||||
from charms.keystone_k8s.v0.identity_credentials import (
|
||||
IdentityCredentialsGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, IdentityCredentialsGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "identity-ops":
|
||||
from charms.keystone_k8s.v0.identity_resource import (
|
||||
IdentityOpsProviderGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, IdentityOpsProviderGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "gnocchi-db":
|
||||
from charms.gnocchi_k8s.v0.gnocchi_service import (
|
||||
GnocchiServiceGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, GnocchiServiceGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "ceph-access":
|
||||
from charms.cinder_ceph_k8s.v0.ceph_access import (
|
||||
CephAccessGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, CephAccessGoneAwayEvent):
|
||||
_is_broken = True
|
||||
case "dns-backend":
|
||||
from charms.designate_bind_k8s.v0.bind_rndc import (
|
||||
BindRndcGoneAwayEvent,
|
||||
)
|
||||
|
||||
if isinstance(event, BindRndcGoneAwayEvent):
|
||||
_is_broken = True
|
||||
|
||||
if _is_broken:
|
||||
broken_relations.add(relation)
|
||||
|
||||
return broken_relations
|
||||
|
||||
def get_mandatory_relations_not_ready(
|
||||
self, event: ops.framework.EventBase
|
||||
) -> Set[str]:
|
||||
"""Get mandatory relations that are not ready for use."""
|
||||
ready_relations = {
|
||||
handler.relation_name
|
||||
for handler in self.relation_handlers
|
||||
if handler.mandatory and handler.ready
|
||||
}
|
||||
|
||||
# The relation data for broken relations are not cleared during
|
||||
# processing of gone away event. This is a temporary workaround
|
||||
# to mark broken relations as not ready.
|
||||
# The workaround can be removed once the below bug is resolved
|
||||
# https://bugs.launchpad.net/juju/+bug/2024583
|
||||
# https://github.com/canonical/operator/issues/940
|
||||
broken_relations = self.check_broken_relations(ready_relations, event)
|
||||
ready_relations = ready_relations.difference(broken_relations)
|
||||
|
||||
not_ready_relations = self.mandatory_relations.difference(
|
||||
ready_relations
|
||||
)
|
||||
|
||||
return not_ready_relations
|
||||
|
||||
def contexts(self) -> sunbeam_core.OPSCharmContexts:
|
||||
"""Construct context for rendering templates."""
|
||||
ra = sunbeam_core.OPSCharmContexts(self)
|
||||
for handler in self.relation_handlers:
|
||||
if handler.relation_name not in self.meta.relations.keys():
|
||||
logger.info(
|
||||
f"Dropping handler for relation {handler.relation_name}, "
|
||||
"relation not present in charm metadata"
|
||||
)
|
||||
continue
|
||||
if handler.ready:
|
||||
ra.add_relation_handler(handler)
|
||||
ra.add_config_contexts(self.config_contexts)
|
||||
return ra
|
||||
|
||||
def bootstrapped(self) -> bool:
|
||||
"""Determine whether the service has been bootstrapped."""
|
||||
return self._state.unit_bootstrapped and self.is_leader_ready()
|
||||
|
||||
def leader_set(self, settings: dict = None, **kwargs) -> None:
|
||||
"""Juju set data in peer data bag."""
|
||||
settings = settings or {}
|
||||
settings.update(kwargs)
|
||||
self.peers.set_app_data(settings=settings)
|
||||
|
||||
def leader_get(self, key: str) -> str:
|
||||
"""Retrieve data from the peer relation."""
|
||||
return self.peers.get_app_data(key)
|
||||
|
||||
def set_leader_ready(self) -> None:
|
||||
"""Tell peers that the leader is ready."""
|
||||
try:
|
||||
self.peers.set_leader_ready()
|
||||
except AttributeError:
|
||||
logging.warning("Cannot set leader ready as peer relation missing")
|
||||
|
||||
def is_leader_ready(self) -> bool:
|
||||
"""Has the lead unit announced that it is ready."""
|
||||
leader_ready = False
|
||||
try:
|
||||
leader_ready = self.peers.is_leader_ready()
|
||||
except AttributeError:
|
||||
logging.warning(
|
||||
"Cannot check leader ready as peer relation missing. "
|
||||
"Assuming it is ready."
|
||||
)
|
||||
leader_ready = True
|
||||
return leader_ready
|
||||
|
||||
|
||||
class OSBaseOperatorCharmK8S(OSBaseOperatorCharm):
|
||||
"""Base charm class for k8s based charms."""
|
||||
|
||||
def __init__(self, framework: ops.framework.Framework) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(framework)
|
||||
self.pebble_handlers = self.get_pebble_handlers()
|
||||
|
||||
def get_pebble_handlers(self) -> List[sunbeam_chandlers.PebbleHandler]:
|
||||
"""Pebble handlers for the operator."""
|
||||
return [
|
||||
sunbeam_chandlers.PebbleHandler(
|
||||
self,
|
||||
self.service_name,
|
||||
self.service_name,
|
||||
self.container_configs,
|
||||
self.template_dir,
|
||||
self.configure_charm,
|
||||
)
|
||||
]
|
||||
|
||||
def get_named_pebble_handler(
|
||||
self, container_name: str
|
||||
) -> sunbeam_chandlers.PebbleHandler:
|
||||
"""Get pebble handler matching container_name."""
|
||||
pebble_handlers = [
|
||||
h
|
||||
for h in self.pebble_handlers
|
||||
if h.container_name == container_name
|
||||
]
|
||||
assert len(pebble_handlers) < 2, (
|
||||
"Multiple pebble handlers with the " "same name found."
|
||||
)
|
||||
if pebble_handlers:
|
||||
return pebble_handlers[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_named_pebble_handlers(
|
||||
self, container_names: List[str]
|
||||
) -> List[sunbeam_chandlers.PebbleHandler]:
|
||||
"""Get pebble handlers matching container_names."""
|
||||
return [
|
||||
h
|
||||
for h in self.pebble_handlers
|
||||
if h.container_name in container_names
|
||||
]
|
||||
|
||||
def init_container_services(self):
|
||||
"""Run init on pebble handlers that are ready."""
|
||||
for ph in self.pebble_handlers:
|
||||
if ph.pebble_ready:
|
||||
logging.debug(f"Running init for {ph.service_name}")
|
||||
ph.init_service(self.contexts())
|
||||
else:
|
||||
logging.debug(
|
||||
f"Not running init for {ph.service_name},"
|
||||
" container not ready"
|
||||
)
|
||||
raise sunbeam_guard.WaitingExceptionError(
|
||||
"Payload container not ready"
|
||||
)
|
||||
|
||||
def check_pebble_handlers_ready(self):
|
||||
"""Check pebble handlers are ready."""
|
||||
for ph in self.pebble_handlers:
|
||||
if not ph.service_ready:
|
||||
logging.debug(
|
||||
f"Aborting container {ph.service_name} service not ready"
|
||||
)
|
||||
raise sunbeam_guard.WaitingExceptionError(
|
||||
"Container service not ready"
|
||||
)
|
||||
|
||||
def stop_services(self, relation: Optional[Set[str]] = None) -> None:
|
||||
"""Stop all running services."""
|
||||
for ph in self.pebble_handlers:
|
||||
if ph.pebble_ready:
|
||||
logging.debug(
|
||||
f"Stopping all services in container {ph.container_name}"
|
||||
)
|
||||
ph.stop_all()
|
||||
|
||||
def configure_unit(self, event: ops.framework.EventBase) -> None:
|
||||
"""Run configuration on this unit."""
|
||||
self.check_leader_ready()
|
||||
self.check_relation_handlers_ready(event)
|
||||
self.open_ports()
|
||||
self.init_container_services()
|
||||
self.check_pebble_handlers_ready()
|
||||
self.run_db_sync()
|
||||
self._state.unit_bootstrapped = True
|
||||
|
||||
def add_pebble_health_checks(self):
|
||||
"""Add health checks for services in payload containers."""
|
||||
for ph in self.pebble_handlers:
|
||||
ph.add_healthchecks()
|
||||
|
||||
def post_config_setup(self):
|
||||
"""Configuration steps after services have been setup."""
|
||||
self.add_pebble_health_checks()
|
||||
logger.info("Setting active status")
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
@property
|
||||
def container_configs(self) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Container configuration files for the operator."""
|
||||
return []
|
||||
|
||||
@property
|
||||
def container_names(self) -> List[str]:
|
||||
"""Names of Containers that form part of this service."""
|
||||
return [self.service_name]
|
||||
|
||||
def containers_ready(self) -> bool:
|
||||
"""Determine whether all containers are ready for configuration."""
|
||||
for ph in self.pebble_handlers:
|
||||
if not ph.service_ready:
|
||||
logger.info(f"Container incomplete: {ph.container_name}")
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def db_sync_container_name(self) -> str:
|
||||
"""Name of Containerto run db sync from."""
|
||||
return self.service_name
|
||||
|
||||
@tenacity.retry(
|
||||
stop=tenacity.stop_after_attempt(3),
|
||||
retry=(
|
||||
tenacity.retry_if_exception_type(ops.pebble.ChangeError)
|
||||
| tenacity.retry_if_exception_type(ops.pebble.ExecError)
|
||||
),
|
||||
after=tenacity.after_log(logger, logging.WARNING),
|
||||
wait=tenacity.wait_exponential(multiplier=1, min=10, max=300),
|
||||
)
|
||||
def _retry_db_sync(self, cmd):
|
||||
container = self.unit.get_container(self.db_sync_container_name)
|
||||
logging.debug("Running sync: \n%s", cmd)
|
||||
process = container.exec(cmd, timeout=5 * 60)
|
||||
out, warnings = process.wait_output()
|
||||
if warnings:
|
||||
for line in warnings.splitlines():
|
||||
logger.warning("DB Sync Out: %s", line.strip())
|
||||
logging.debug("Output from database sync: \n%s", out)
|
||||
|
||||
@sunbeam_job_ctrl.run_once_per_unit("db-sync")
|
||||
def run_db_sync(self) -> None:
|
||||
"""Run DB sync to init DB.
|
||||
|
||||
:raises: pebble.ExecError
|
||||
"""
|
||||
if not self.unit.is_leader():
|
||||
logging.info("Not lead unit, skipping DB syncs")
|
||||
return
|
||||
try:
|
||||
if self.db_sync_cmds:
|
||||
logger.info("Syncing database...")
|
||||
for cmd in self.db_sync_cmds:
|
||||
try:
|
||||
self._retry_db_sync(cmd)
|
||||
except tenacity.RetryError:
|
||||
raise sunbeam_guard.BlockedExceptionError(
|
||||
"DB sync failed"
|
||||
)
|
||||
except AttributeError:
|
||||
logger.warning(
|
||||
"Not DB sync ran. Charm does not specify self.db_sync_cmds"
|
||||
)
|
||||
|
||||
def open_ports(self):
|
||||
"""Register ports in underlying cloud."""
|
||||
pass
|
||||
|
||||
|
||||
class OSBaseOperatorAPICharm(OSBaseOperatorCharmK8S):
|
||||
"""Base class for OpenStack API operators."""
|
||||
|
||||
mandatory_relations = {"database", "identity-service", "ingress-public"}
|
||||
|
||||
def __init__(self, framework: ops.framework.Framework) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(framework)
|
||||
|
||||
@property
|
||||
def service_endpoints(self) -> List[dict]:
|
||||
"""List of endpoints for this service."""
|
||||
return []
|
||||
|
||||
def get_relation_handlers(
|
||||
self, handlers: List[sunbeam_rhandlers.RelationHandler] = None
|
||||
) -> List[sunbeam_rhandlers.RelationHandler]:
|
||||
"""Relation handlers for the service."""
|
||||
handlers = handlers or []
|
||||
# Note: intentionally including the ingress handler here in order to
|
||||
# be able to link the ingress and identity-service handlers.
|
||||
if self.can_add_handler("ingress-internal", handlers):
|
||||
self.ingress_internal = sunbeam_rhandlers.IngressInternalHandler(
|
||||
self,
|
||||
"ingress-internal",
|
||||
self.service_name,
|
||||
self.default_public_ingress_port,
|
||||
self._ingress_changed,
|
||||
"ingress-internal" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.ingress_internal)
|
||||
if self.can_add_handler("ingress-public", handlers):
|
||||
self.ingress_public = sunbeam_rhandlers.IngressPublicHandler(
|
||||
self,
|
||||
"ingress-public",
|
||||
self.service_name,
|
||||
self.default_public_ingress_port,
|
||||
self._ingress_changed,
|
||||
"ingress-public" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.ingress_public)
|
||||
if self.can_add_handler("identity-service", handlers):
|
||||
self.id_svc = sunbeam_rhandlers.IdentityServiceRequiresHandler(
|
||||
self,
|
||||
"identity-service",
|
||||
self.configure_charm,
|
||||
self.service_endpoints,
|
||||
self.model.config["region"],
|
||||
"identity-service" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.id_svc)
|
||||
return super().get_relation_handlers(handlers)
|
||||
|
||||
def _ingress_changed(self, event: ops.framework.EventBase) -> None:
|
||||
"""Ingress changed callback.
|
||||
|
||||
Invoked when the data on the ingress relation has changed. This will
|
||||
update the relevant endpoints with the identity service, and then
|
||||
call the configure_charm.
|
||||
"""
|
||||
logger.debug("Received an ingress_changed event")
|
||||
try:
|
||||
if self.id_svc.update_service_endpoints:
|
||||
logger.debug(
|
||||
"Updating service endpoints after ingress "
|
||||
"relation changed."
|
||||
)
|
||||
self.id_svc.update_service_endpoints(self.service_endpoints)
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
self.configure_charm(event)
|
||||
|
||||
def service_url(self, hostname: str) -> str:
|
||||
"""Service url for accessing this service via the given hostname."""
|
||||
return f"http://{hostname}:{self.default_public_ingress_port}"
|
||||
|
||||
@property
|
||||
def public_ingress_address(self) -> str:
|
||||
"""IP address or hostname for access to this service."""
|
||||
svc_hostname = self.model.config.get("os-public-hostname")
|
||||
if svc_hostname:
|
||||
return svc_hostname
|
||||
|
||||
client = Client()
|
||||
charm_service = client.get(
|
||||
Service, name=self.app.name, namespace=self.model.name
|
||||
)
|
||||
|
||||
status = charm_service.status
|
||||
if status:
|
||||
load_balancer_status = status.loadBalancer
|
||||
if load_balancer_status:
|
||||
ingress_addresses = load_balancer_status.ingress
|
||||
if ingress_addresses:
|
||||
logger.debug(
|
||||
"Found ingress addresses on loadbalancer " "status"
|
||||
)
|
||||
ingress_address = ingress_addresses[0]
|
||||
addr = ingress_address.hostname or ingress_address.ip
|
||||
if addr:
|
||||
logger.debug(
|
||||
"Using ingress address from loadbalancer "
|
||||
f"as {addr}"
|
||||
)
|
||||
return ingress_address.hostname or ingress_address.ip
|
||||
|
||||
hostname = self.model.get_binding(
|
||||
"identity-service"
|
||||
).network.ingress_address
|
||||
return hostname
|
||||
|
||||
@property
|
||||
def public_url(self) -> str:
|
||||
"""Url for accessing the public endpoint for this service."""
|
||||
try:
|
||||
if self.ingress_public.url:
|
||||
logger.debug(
|
||||
"Ingress-public relation found, returning "
|
||||
"ingress-public.url of: %s",
|
||||
self.ingress_public.url,
|
||||
)
|
||||
return self.add_explicit_port(self.ingress_public.url)
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
return self.add_explicit_port(
|
||||
self.service_url(self.public_ingress_address)
|
||||
)
|
||||
|
||||
@property
|
||||
def admin_url(self) -> str:
|
||||
"""Url for accessing the admin endpoint for this service."""
|
||||
hostname = self.model.get_binding(
|
||||
"identity-service"
|
||||
).network.ingress_address
|
||||
return self.add_explicit_port(self.service_url(hostname))
|
||||
|
||||
@property
|
||||
def internal_url(self) -> str:
|
||||
"""Url for accessing the internal endpoint for this service."""
|
||||
try:
|
||||
if self.ingress_internal.url:
|
||||
logger.debug(
|
||||
"Ingress-internal relation found, returning "
|
||||
"ingress_internal.url of: %s",
|
||||
self.ingress_internal.url,
|
||||
)
|
||||
return self.add_explicit_port(self.ingress_internal.url)
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
hostname = self.model.get_binding(
|
||||
"identity-service"
|
||||
).network.ingress_address
|
||||
return self.add_explicit_port(self.service_url(hostname))
|
||||
|
||||
def get_pebble_handlers(self) -> List[sunbeam_chandlers.PebbleHandler]:
|
||||
"""Pebble handlers for the service."""
|
||||
return [
|
||||
sunbeam_chandlers.WSGIPebbleHandler(
|
||||
self,
|
||||
self.service_name,
|
||||
self.service_name,
|
||||
self.container_configs,
|
||||
self.template_dir,
|
||||
self.configure_charm,
|
||||
f"wsgi-{self.service_name}",
|
||||
)
|
||||
]
|
||||
|
||||
@property
|
||||
def container_configs(self) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Container configuration files for the service."""
|
||||
_cconfigs = super().container_configs
|
||||
_cconfigs.extend(
|
||||
[
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
self.service_conf,
|
||||
self.service_user,
|
||||
self.service_group,
|
||||
)
|
||||
]
|
||||
)
|
||||
return _cconfigs
|
||||
|
||||
@property
|
||||
def service_user(self) -> str:
|
||||
"""Service user file and directory ownership."""
|
||||
return self.service_name
|
||||
|
||||
@property
|
||||
def service_group(self) -> str:
|
||||
"""Service group file and directory ownership."""
|
||||
return self.service_name
|
||||
|
||||
@property
|
||||
def service_conf(self) -> str:
|
||||
"""Service default configuration file."""
|
||||
return f"/etc/{self.service_name}/{self.service_name}.conf"
|
||||
|
||||
@property
|
||||
def config_contexts(self) -> List[sunbeam_config_contexts.ConfigContext]:
|
||||
"""Generate list of configuration adapters for the charm."""
|
||||
_cadapters = super().config_contexts
|
||||
_cadapters.extend(
|
||||
[
|
||||
sunbeam_config_contexts.WSGIWorkerConfigContext(
|
||||
self, "wsgi_config"
|
||||
)
|
||||
]
|
||||
)
|
||||
return _cadapters
|
||||
|
||||
@property
|
||||
def wsgi_container_name(self) -> str:
|
||||
"""Name of the WSGI application container."""
|
||||
return self.service_name
|
||||
|
||||
@property
|
||||
def default_public_ingress_port(self) -> int:
|
||||
"""Port to use for ingress access to service."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def db_sync_container_name(self) -> str:
|
||||
"""Name of Containerto run db sync from."""
|
||||
return self.wsgi_container_name
|
||||
|
||||
@property
|
||||
def healthcheck_period(self) -> str:
|
||||
"""Healthcheck period for the service."""
|
||||
return "10s" # Default value in pebble
|
||||
|
||||
@property
|
||||
def healthcheck_http_url(self) -> str:
|
||||
"""Healthcheck HTTP URL for the service."""
|
||||
return f"http://localhost:{self.default_public_ingress_port}/"
|
||||
|
||||
@property
|
||||
def healthcheck_http_timeout(self) -> str:
|
||||
"""Healthcheck HTTP timeout for the service."""
|
||||
return "3s"
|
||||
|
||||
def open_ports(self):
|
||||
"""Register ports in underlying cloud."""
|
||||
self.unit.open_port("tcp", self.default_public_ingress_port)
|
||||
|
||||
def add_explicit_port(self, org_url: str) -> str:
|
||||
"""Update a url to add an explicit port.
|
||||
|
||||
Keystone auth endpoint parsing can give odd results if
|
||||
an explicit port is missing.
|
||||
"""
|
||||
url = urllib.parse.urlparse(org_url)
|
||||
new_netloc = url.netloc
|
||||
if not url.port:
|
||||
if url.scheme == "http":
|
||||
new_netloc = url.netloc + ":80"
|
||||
elif url.scheme == "https":
|
||||
new_netloc = url.netloc + ":443"
|
||||
return urllib.parse.urlunparse(
|
||||
(
|
||||
url.scheme,
|
||||
new_netloc,
|
||||
url.path,
|
||||
url.params,
|
||||
url.query,
|
||||
url.fragment,
|
||||
)
|
||||
)
|
250
ops-sunbeam/ops_sunbeam/compound_status.py
Normal file
250
ops-sunbeam/ops_sunbeam/compound_status.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""A mini library for tracking status messages.
|
||||
|
||||
We want this because keeping track of everything
|
||||
with a single unit.status is too difficult.
|
||||
|
||||
The user will still see a single status and message
|
||||
(one deemed to be the highest priority),
|
||||
but the charm can easily set the status of various
|
||||
aspects of the application without clobbering other parts.
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Optional,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from ops.charm import (
|
||||
CharmBase,
|
||||
)
|
||||
from ops.framework import (
|
||||
CommitEvent,
|
||||
Handle,
|
||||
Object,
|
||||
StoredStateData,
|
||||
)
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
StatusBase,
|
||||
UnknownStatus,
|
||||
WaitingStatus,
|
||||
)
|
||||
from ops.storage import (
|
||||
NoSnapshotError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STATUS_PRIORITIES = {
|
||||
"blocked": 1,
|
||||
"waiting": 2,
|
||||
"maintenance": 3,
|
||||
"active": 4,
|
||||
"unknown": 5,
|
||||
}
|
||||
|
||||
|
||||
class Status:
|
||||
"""An atomic status.
|
||||
|
||||
A wrapper around a StatusBase from ops,
|
||||
that adds a priority, label,
|
||||
and methods for use with a pool of statuses.
|
||||
"""
|
||||
|
||||
def __init__(self, label: str, priority: int = 0) -> None:
|
||||
"""Create a new Status object.
|
||||
|
||||
label: string label
|
||||
priority: integer, higher number is higher priority, default is 0
|
||||
"""
|
||||
self.label: str = label
|
||||
self._priority: int = priority
|
||||
self.never_set = True
|
||||
|
||||
# The actual status of this Status object.
|
||||
# Use `self.set(...)` to update it.
|
||||
self.status: StatusBase = UnknownStatus()
|
||||
|
||||
# if on_update is set,
|
||||
# it will be called as a function with no arguments
|
||||
# whenever the status is set.
|
||||
self.on_update: Optional[Callable[[], None]] = None
|
||||
|
||||
def set(self, status: StatusBase) -> None:
|
||||
"""Set the status.
|
||||
|
||||
Will also run the on_update hook if available
|
||||
(should be set by the pool so the pool knows when it should update).
|
||||
"""
|
||||
self.status = status
|
||||
self.never_set = False
|
||||
if self.on_update is not None:
|
||||
self.on_update()
|
||||
|
||||
def message(self) -> str:
|
||||
"""Get the status message consistently.
|
||||
|
||||
Useful because UnknownStatus has no message attribute.
|
||||
"""
|
||||
if self.status.name == "unknown":
|
||||
return ""
|
||||
return self.status.message
|
||||
|
||||
def priority(self) -> Tuple[int, int]:
|
||||
"""Return a value to use for sorting statuses by priority.
|
||||
|
||||
Used by the pool to retrieve the highest priority status
|
||||
to display to the user.
|
||||
"""
|
||||
return STATUS_PRIORITIES[self.status.name], -self._priority
|
||||
|
||||
def _serialize(self) -> dict:
|
||||
"""Serialize Status for storage."""
|
||||
return {
|
||||
"status": self.status.name,
|
||||
"message": self.message(),
|
||||
}
|
||||
|
||||
|
||||
class StatusPool(Object):
|
||||
"""A pool of Status objects.
|
||||
|
||||
This is implemented as an `Object`,
|
||||
so we can more simply save state between hook executions.
|
||||
"""
|
||||
|
||||
def __init__(self, charm: CharmBase) -> None:
|
||||
"""Init the status pool and restore from stored state if available.
|
||||
|
||||
Note that instantiating more than one StatusPool here is not supported,
|
||||
due to hardcoded framework stored data IDs.
|
||||
If we want that in the future,
|
||||
we'll need to generate a custom deterministic ID.
|
||||
I can't think of any cases where
|
||||
more than one StatusPool is required though...
|
||||
"""
|
||||
super().__init__(charm, "status_pool")
|
||||
self._pool: Dict[str, Status] = {}
|
||||
self._charm = charm
|
||||
|
||||
# Restore info from the charm's state.
|
||||
# We need to do this on init,
|
||||
# so we can retain previous statuses that were set.
|
||||
charm.framework.register_type(
|
||||
StoredStateData, self, StoredStateData.handle_kind
|
||||
)
|
||||
stored_handle = Handle(
|
||||
self, StoredStateData.handle_kind, "_status_pool"
|
||||
)
|
||||
|
||||
try:
|
||||
self._state = charm.framework.load_snapshot(stored_handle)
|
||||
status_state = json.loads(self._state["statuses"])
|
||||
except NoSnapshotError:
|
||||
self._state = StoredStateData(self, "_status_pool")
|
||||
status_state = []
|
||||
self._status_state = status_state
|
||||
|
||||
# 'commit' is an ops framework event
|
||||
# that tells the object to save a snapshot of its state for later.
|
||||
charm.framework.observe(charm.framework.on.commit, self._on_commit)
|
||||
|
||||
def add(self, status: Status) -> None:
|
||||
"""Idempotently add a status object to the pool.
|
||||
|
||||
Reconstitute from saved state if it's a new status.
|
||||
"""
|
||||
if (
|
||||
status.never_set
|
||||
and status.label in self._status_state
|
||||
and status.label not in self._pool
|
||||
):
|
||||
# If this status hasn't been seen or set yet,
|
||||
# and we have saved state for it,
|
||||
# then reconstitute it.
|
||||
# This allows us to retain statuses across hook invocations.
|
||||
saved = self._status_state[status.label]
|
||||
status.status = StatusBase.from_name(
|
||||
saved["status"],
|
||||
saved["message"],
|
||||
)
|
||||
|
||||
self._pool[status.label] = status
|
||||
status.on_update = self.on_update
|
||||
self.on_update()
|
||||
|
||||
def summarise(self) -> str:
|
||||
"""Return a human readable summary of all the statuses in the pool.
|
||||
|
||||
Will be a multi-line string.
|
||||
"""
|
||||
lines = []
|
||||
for status in sorted(self._pool.values(), key=lambda x: x.priority()):
|
||||
lines.append(
|
||||
"{label:>30}: {status:>10} | {message}".format(
|
||||
label=status.label,
|
||||
message=status.message(),
|
||||
status=status.status.name,
|
||||
)
|
||||
)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _on_commit(self, _event: CommitEvent) -> None:
|
||||
"""Store the current state of statuses.
|
||||
|
||||
So we can restore them on the next run of the charm.
|
||||
"""
|
||||
self._state["statuses"] = json.dumps(
|
||||
{
|
||||
status.label: status._serialize()
|
||||
for status in self._pool.values()
|
||||
}
|
||||
)
|
||||
self._charm.framework.save_snapshot(self._state)
|
||||
self._charm.framework._storage.commit()
|
||||
|
||||
def on_update(self) -> None:
|
||||
"""Update the unit status with the current highest priority status.
|
||||
|
||||
Use as a hook to run whenever a status is updated in the pool.
|
||||
"""
|
||||
status = (
|
||||
sorted(self._pool.values(), key=lambda x: x.priority())[0]
|
||||
if self._pool
|
||||
else None
|
||||
)
|
||||
if status is None or status.status.name == "unknown":
|
||||
self._charm.unit.status = WaitingStatus("no status set yet")
|
||||
elif status.status.name == "active" and not status.message():
|
||||
# Avoid status name prefix if everything is active with no message.
|
||||
# If there's a message, then we want the prefix
|
||||
# to help identify where the message originates.
|
||||
self._charm.unit.status = ActiveStatus("")
|
||||
else:
|
||||
message = status.message()
|
||||
self._charm.unit.status = StatusBase.from_name(
|
||||
status.status.name,
|
||||
"({}){}".format(
|
||||
status.label,
|
||||
" " + message if message else "",
|
||||
),
|
||||
)
|
128
ops-sunbeam/ops_sunbeam/config_contexts.py
Normal file
128
ops-sunbeam/ops_sunbeam/config_contexts.py
Normal file
@@ -0,0 +1,128 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining a charm using the Operator framework.
|
||||
|
||||
ConfigContext objects can be used when rendering templates. They idea is to
|
||||
create reusable contexts which translate charm config, deployment state etc.
|
||||
These are not specific to a relation.
|
||||
"""
|
||||
|
||||
from __future__ import (
|
||||
annotations,
|
||||
)
|
||||
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import ops_sunbeam.charm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# XXX Dulpicating definition in relation handlers
|
||||
ERASURE_CODED = "erasure-coded"
|
||||
REPLICATED = "replicated"
|
||||
|
||||
|
||||
class ConfigContext:
|
||||
"""Base class used for creating a config context."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: "ops_sunbeam.charm.OSBaseOperatorCharm",
|
||||
namespace: str,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
self.charm = charm
|
||||
self.namespace = namespace
|
||||
for k, v in self.context().items():
|
||||
k = k.replace("-", "_")
|
||||
setattr(self, k, v)
|
||||
|
||||
@property
|
||||
def ready(self) -> bool:
|
||||
"""Whether the context has all the data is needs."""
|
||||
return True
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Context used when rendering templates."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CharmConfigContext(ConfigContext):
|
||||
"""A context containing all of the charms config options."""
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Charms config options."""
|
||||
return self.charm.config
|
||||
|
||||
|
||||
class WSGIWorkerConfigContext(ConfigContext):
|
||||
"""Configuration context for WSGI configuration."""
|
||||
|
||||
def context(self) -> dict:
|
||||
"""WSGI configuration options."""
|
||||
return {
|
||||
"name": self.charm.service_name,
|
||||
"public_port": self.charm.default_public_ingress_port,
|
||||
"user": self.charm.service_user,
|
||||
"group": self.charm.service_group,
|
||||
"wsgi_admin_script": self.charm.wsgi_admin_script,
|
||||
"wsgi_public_script": self.charm.wsgi_public_script,
|
||||
"error_log": "/dev/stdout",
|
||||
"custom_log": "/dev/stdout",
|
||||
}
|
||||
|
||||
|
||||
class CephConfigurationContext(ConfigContext):
|
||||
"""Ceph configuration context."""
|
||||
|
||||
def context(self) -> None:
|
||||
"""Ceph configuration context."""
|
||||
config = self.charm.model.config.get
|
||||
ctxt = {}
|
||||
if config("pool-type") and config("pool-type") == "erasure-coded":
|
||||
base_pool_name = config("rbd-pool") or config("rbd-pool-name")
|
||||
if not base_pool_name:
|
||||
base_pool_name = self.charm.app.name
|
||||
ctxt["rbd_default_data_pool"] = base_pool_name
|
||||
return ctxt
|
||||
|
||||
|
||||
class CinderCephConfigurationContext(ConfigContext):
|
||||
"""Cinder Ceph configuration context."""
|
||||
|
||||
def context(self) -> None:
|
||||
"""Cinder Ceph configuration context."""
|
||||
config = self.charm.model.config.get
|
||||
data_pool_name = config("rbd-pool-name") or self.charm.app.name
|
||||
if config("pool-type") == ERASURE_CODED:
|
||||
pool_name = (
|
||||
config("ec-rbd-metadata-pool") or f"{data_pool_name}-metadata"
|
||||
)
|
||||
else:
|
||||
pool_name = data_pool_name
|
||||
backend_name = config("volume-backend-name") or self.charm.app.name
|
||||
# TODO:
|
||||
# secret_uuid needs to be generated and shared for the app
|
||||
return {
|
||||
"cluster_name": self.charm.app.name,
|
||||
"rbd_pool": pool_name,
|
||||
"rbd_user": self.charm.app.name,
|
||||
"backend_name": backend_name,
|
||||
"backend_availability_zone": config("backend-availability-zone"),
|
||||
}
|
482
ops-sunbeam/ops_sunbeam/container_handlers.py
Normal file
482
ops-sunbeam/ops_sunbeam/container_handlers.py
Normal file
@@ -0,0 +1,482 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining Pebble handlers.
|
||||
|
||||
The PebbleHandler defines the pebble layers, manages pushing
|
||||
configuration to the containers and managing the service running
|
||||
in the container.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import logging
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
)
|
||||
from typing import (
|
||||
List,
|
||||
TypedDict,
|
||||
)
|
||||
|
||||
import ops.charm
|
||||
import ops.pebble
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
BlockedStatus,
|
||||
WaitingStatus,
|
||||
)
|
||||
|
||||
import ops_sunbeam.compound_status as compound_status
|
||||
import ops_sunbeam.core as sunbeam_core
|
||||
import ops_sunbeam.templating as sunbeam_templating
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ContainerDir = collections.namedtuple(
|
||||
"ContainerDir", ["path", "user", "group"]
|
||||
)
|
||||
|
||||
|
||||
class PebbleHandler(ops.charm.Object):
|
||||
"""Base handler for Pebble based containers."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: ops.charm.CharmBase,
|
||||
container_name: str,
|
||||
service_name: str,
|
||||
container_configs: List[sunbeam_core.ContainerConfigFile],
|
||||
template_dir: str,
|
||||
callback_f: Callable,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(charm, None)
|
||||
self.charm = charm
|
||||
self.container_name = container_name
|
||||
self.service_name = service_name
|
||||
self.container_configs = container_configs
|
||||
self.container_configs.extend(self.default_container_configs())
|
||||
self.template_dir = template_dir
|
||||
self.callback_f = callback_f
|
||||
self.setup_pebble_handler()
|
||||
|
||||
self.status = compound_status.Status("container:" + container_name)
|
||||
self.charm.status_pool.add(self.status)
|
||||
|
||||
self.framework.observe(
|
||||
self.charm.on.update_status, self._on_update_status
|
||||
)
|
||||
|
||||
def setup_pebble_handler(self) -> None:
|
||||
"""Configure handler for pebble ready event."""
|
||||
prefix = self.container_name.replace("-", "_")
|
||||
pebble_ready_event = getattr(self.charm.on, f"{prefix}_pebble_ready")
|
||||
self.framework.observe(
|
||||
pebble_ready_event, self._on_service_pebble_ready
|
||||
)
|
||||
|
||||
def _on_service_pebble_ready(
|
||||
self, event: ops.charm.PebbleReadyEvent
|
||||
) -> None:
|
||||
"""Handle pebble ready event."""
|
||||
container = event.workload
|
||||
container.add_layer(self.service_name, self.get_layer(), combine=True)
|
||||
logger.debug(f"Plan: {container.get_plan()}")
|
||||
self.charm.configure_charm(event)
|
||||
|
||||
def write_config(
|
||||
self, context: sunbeam_core.OPSCharmContexts
|
||||
) -> List[str]:
|
||||
"""Write configuration files into the container.
|
||||
|
||||
Write self.container_configs into container if there contents
|
||||
have changed.
|
||||
|
||||
:return: List of files that were updated
|
||||
:rtype: List
|
||||
"""
|
||||
files_updated = []
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
if container:
|
||||
for config in self.container_configs:
|
||||
changed = sunbeam_templating.sidecar_config_render(
|
||||
container,
|
||||
config,
|
||||
self.template_dir,
|
||||
context,
|
||||
)
|
||||
if changed:
|
||||
files_updated.append(config.path)
|
||||
logger.debug(f"Changes detected in {files_updated}")
|
||||
else:
|
||||
logger.debug("No file changes detected")
|
||||
else:
|
||||
logger.debug("Container not ready")
|
||||
if files_updated:
|
||||
logger.debug(f"Changes detected in {files_updated}")
|
||||
else:
|
||||
logger.debug("No file changes detected")
|
||||
return files_updated
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Pebble configuration layer for the container."""
|
||||
return {}
|
||||
|
||||
def get_healthcheck_layer(self) -> dict:
|
||||
"""Pebble configuration for health check layer for the container."""
|
||||
return {}
|
||||
|
||||
@property
|
||||
def directories(self) -> List[ContainerDir]:
|
||||
"""List of directories to create in container."""
|
||||
return []
|
||||
|
||||
def setup_dirs(self) -> None:
|
||||
"""Create directories in container."""
|
||||
if self.directories:
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
for d in self.directories:
|
||||
logging.debug(f"Creating {d.path}")
|
||||
container.make_dir(
|
||||
d.path, user=d.user, group=d.group, make_parents=True
|
||||
)
|
||||
|
||||
def init_service(self, context: sunbeam_core.OPSCharmContexts) -> None:
|
||||
"""Initialise service ready for use.
|
||||
|
||||
Write configuration files to the container and record
|
||||
that service is ready for us.
|
||||
"""
|
||||
self.setup_dirs()
|
||||
self.write_config(context)
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
def default_container_configs(
|
||||
self,
|
||||
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Generate default container configurations.
|
||||
|
||||
These should be used by all inheriting classes and are
|
||||
automatically added to the list or container configurations
|
||||
provided during object instantiation.
|
||||
"""
|
||||
return []
|
||||
|
||||
@property
|
||||
def pebble_ready(self) -> bool:
|
||||
"""Determine if pebble is running and ready for use."""
|
||||
return self.charm.unit.get_container(self.container_name).can_connect()
|
||||
|
||||
@property
|
||||
def service_ready(self) -> bool:
|
||||
"""Determine whether the service the container provides is running."""
|
||||
if not self.pebble_ready:
|
||||
return False
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
services = container.get_services()
|
||||
return all([s.is_running() for s in services.values()])
|
||||
|
||||
def execute(
|
||||
self, cmd: List, exception_on_error: bool = False, **kwargs: TypedDict
|
||||
) -> str:
|
||||
"""Execute given command in container managed by this handler.
|
||||
|
||||
:param cmd: command to execute, specified as a list of strings
|
||||
:param exception_on_error: determines whether or not to raise
|
||||
an exception if the command fails. By default, this method
|
||||
will not raise an exception if the command fails. If it is
|
||||
raised, this will rase an ops.pebble.ExecError.
|
||||
:param kwargs: arguments to pass into the ops.model.Container's
|
||||
execute command.
|
||||
"""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
process = container.exec(cmd, **kwargs)
|
||||
try:
|
||||
stdout, _ = process.wait_output()
|
||||
# Not logging the command in case it included a password,
|
||||
# too cautious ?
|
||||
logger.debug("Command complete")
|
||||
if stdout:
|
||||
for line in stdout.splitlines():
|
||||
logger.debug(" %s", line)
|
||||
return stdout
|
||||
except ops.pebble.ExecError as e:
|
||||
logger.error("Exited with code %d. Stderr:", e.exit_code)
|
||||
for line in e.stderr.splitlines():
|
||||
logger.error(" %s", line)
|
||||
if exception_on_error:
|
||||
raise
|
||||
|
||||
def add_healthchecks(self) -> None:
|
||||
"""Add healthcheck layer to the plan."""
|
||||
healthcheck_layer = self.get_healthcheck_layer()
|
||||
if not healthcheck_layer:
|
||||
logger.debug("Healthcheck layer not defined in pebble handler")
|
||||
return
|
||||
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
try:
|
||||
plan = container.get_plan()
|
||||
if not plan.checks:
|
||||
logger.debug("Adding healthcheck layer to the plan")
|
||||
container.add_layer(
|
||||
"healthchecks", healthcheck_layer, combine=True
|
||||
)
|
||||
except ops.pebble.ConnectionError as connect_error:
|
||||
logger.error("Not able to add Healthcheck layer")
|
||||
logger.exception(connect_error)
|
||||
|
||||
def _on_update_status(self, event: ops.framework.EventBase) -> None:
|
||||
"""Assess and set status.
|
||||
|
||||
Also takes into account healthchecks.
|
||||
"""
|
||||
if not self.pebble_ready:
|
||||
self.status.set(WaitingStatus("pebble not ready"))
|
||||
return
|
||||
|
||||
if not self.service_ready:
|
||||
self.status.set(WaitingStatus("service not ready"))
|
||||
return
|
||||
|
||||
failed = []
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
checks = container.get_checks(level=ops.pebble.CheckLevel.READY)
|
||||
for name, check in checks.items():
|
||||
if check.status != ops.pebble.CheckStatus.UP:
|
||||
failed.append(name)
|
||||
|
||||
# Verify alive checks if ready checks are missing
|
||||
if not checks:
|
||||
checks = container.get_checks(level=ops.pebble.CheckLevel.ALIVE)
|
||||
for name, check in checks.items():
|
||||
if check.status != ops.pebble.CheckStatus.UP:
|
||||
failed.append(name)
|
||||
|
||||
if failed:
|
||||
self.status.set(
|
||||
BlockedStatus(
|
||||
"healthcheck{} failed: {}".format(
|
||||
"s" if len(failed) > 1 else "", ", ".join(failed)
|
||||
)
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
def start_all(self, restart: bool = True) -> None:
|
||||
"""Start services in container.
|
||||
|
||||
:param restart: Whether to stop services before starting them.
|
||||
"""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
if not container.can_connect():
|
||||
logger.debug(
|
||||
f"Container {self.container_name} not ready, deferring restart"
|
||||
)
|
||||
return
|
||||
services = container.get_services()
|
||||
for service_name, service in services.items():
|
||||
if not service.is_running():
|
||||
logger.debug(
|
||||
f"Starting {service_name} in {self.container_name}"
|
||||
)
|
||||
container.start(service_name)
|
||||
continue
|
||||
|
||||
if restart:
|
||||
logger.debug(
|
||||
f"Restarting {service_name} in {self.container_name}"
|
||||
)
|
||||
container.restart(service_name)
|
||||
|
||||
def stop_all(self) -> None:
|
||||
"""Stop services in container."""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
if not container.can_connect():
|
||||
logger.debug(
|
||||
f"Container {self.container_name} not ready, no need to stop"
|
||||
)
|
||||
return
|
||||
|
||||
services = container.get_services()
|
||||
if services:
|
||||
logger.debug("Stopping all services")
|
||||
container.stop(*services.keys())
|
||||
|
||||
|
||||
class ServicePebbleHandler(PebbleHandler):
|
||||
"""Container handler for containers which manage a service."""
|
||||
|
||||
def init_service(self, context: sunbeam_core.OPSCharmContexts) -> None:
|
||||
"""Initialise service ready for use.
|
||||
|
||||
Write configuration files to the container and record
|
||||
that service is ready for us.
|
||||
"""
|
||||
self.setup_dirs()
|
||||
files_changed = self.write_config(context)
|
||||
if files_changed:
|
||||
self.start_service(restart=True)
|
||||
else:
|
||||
self.start_service(restart=False)
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
def start_service(self, restart: bool = True) -> None:
|
||||
"""Check and start services in container.
|
||||
|
||||
:param restart: Whether to stop services before starting them.
|
||||
"""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
if not container:
|
||||
logger.debug(
|
||||
f"{self.container_name} container is not ready. "
|
||||
"Cannot start service."
|
||||
)
|
||||
return
|
||||
if self.service_name not in container.get_services().keys():
|
||||
container.add_layer(
|
||||
self.service_name, self.get_layer(), combine=True
|
||||
)
|
||||
self.start_all(restart=restart)
|
||||
|
||||
|
||||
class WSGIPebbleHandler(PebbleHandler):
|
||||
"""WSGI oriented handler for a Pebble managed container."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: ops.charm.CharmBase,
|
||||
container_name: str,
|
||||
service_name: str,
|
||||
container_configs: List[sunbeam_core.ContainerConfigFile],
|
||||
template_dir: str,
|
||||
callback_f: Callable,
|
||||
wsgi_service_name: str,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(
|
||||
charm,
|
||||
container_name,
|
||||
service_name,
|
||||
container_configs,
|
||||
template_dir,
|
||||
callback_f,
|
||||
)
|
||||
self.wsgi_service_name = wsgi_service_name
|
||||
|
||||
def start_wsgi(self, restart: bool = True) -> None:
|
||||
"""Check and start services in container.
|
||||
|
||||
:param restart: Whether to stop services before starting them.
|
||||
"""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
if not container:
|
||||
logger.debug(
|
||||
f"{self.container_name} container is not ready. "
|
||||
"Cannot start wgi service."
|
||||
)
|
||||
return
|
||||
if self.wsgi_service_name not in container.get_services().keys():
|
||||
container.add_layer(
|
||||
self.service_name, self.get_layer(), combine=True
|
||||
)
|
||||
self.start_all(restart=restart)
|
||||
|
||||
def start_service(self) -> None:
|
||||
"""Start the service."""
|
||||
self.start_wsgi()
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Apache WSGI service pebble layer.
|
||||
|
||||
:returns: pebble layer configuration for wsgi service
|
||||
"""
|
||||
return {
|
||||
"summary": f"{self.service_name} layer",
|
||||
"description": "pebble config layer for apache wsgi",
|
||||
"services": {
|
||||
f"{self.wsgi_service_name}": {
|
||||
"override": "replace",
|
||||
"summary": f"{self.service_name} wsgi",
|
||||
"command": "/usr/sbin/apache2ctl -DFOREGROUND",
|
||||
"startup": "disabled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def get_healthcheck_layer(self) -> dict:
|
||||
"""Apache WSGI health check pebble layer.
|
||||
|
||||
:returns: pebble health check layer configuration for wsgi service
|
||||
"""
|
||||
return {
|
||||
"checks": {
|
||||
"up": {
|
||||
"override": "replace",
|
||||
"level": "alive",
|
||||
"period": "10s",
|
||||
"timeout": "3s",
|
||||
"threshold": 3,
|
||||
"exec": {"command": "service apache2 status"},
|
||||
},
|
||||
"online": {
|
||||
"override": "replace",
|
||||
"level": "ready",
|
||||
"period": self.charm.healthcheck_period,
|
||||
"timeout": self.charm.healthcheck_http_timeout,
|
||||
"http": {"url": self.charm.healthcheck_http_url},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
def init_service(self, context: sunbeam_core.OPSCharmContexts) -> None:
|
||||
"""Enable and start WSGI service."""
|
||||
container = self.charm.unit.get_container(self.container_name)
|
||||
files_changed = self.write_config(context)
|
||||
try:
|
||||
process = container.exec(
|
||||
["a2ensite", self.wsgi_service_name], timeout=5 * 60
|
||||
)
|
||||
out, warnings = process.wait_output()
|
||||
if warnings:
|
||||
for line in warnings.splitlines():
|
||||
logger.warning("a2ensite warn: %s", line.strip())
|
||||
logging.debug(f"Output from a2ensite: \n{out}")
|
||||
except ops.pebble.ExecError:
|
||||
logger.exception(
|
||||
f"Failed to enable {self.wsgi_service_name} site in apache"
|
||||
)
|
||||
# ignore for now - pebble is raising an exited too quickly, but it
|
||||
# appears to work properly.
|
||||
files_changed.extend(self.write_config(context))
|
||||
if files_changed:
|
||||
self.start_wsgi(restart=True)
|
||||
else:
|
||||
self.start_wsgi(restart=False)
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
@property
|
||||
def wsgi_conf(self) -> str:
|
||||
"""Location of WSGI config file."""
|
||||
return f"/etc/apache2/sites-available/wsgi-{self.service_name}.conf"
|
||||
|
||||
def default_container_configs(
|
||||
self,
|
||||
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Container configs for WSGI service."""
|
||||
return [
|
||||
sunbeam_core.ContainerConfigFile(self.wsgi_conf, "root", "root")
|
||||
]
|
87
ops-sunbeam/ops_sunbeam/core.py
Normal file
87
ops-sunbeam/ops_sunbeam/core.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Collection of core components."""
|
||||
|
||||
import collections
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Generator,
|
||||
List,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ops_sunbeam.charm import (
|
||||
OSBaseOperatorCharm,
|
||||
)
|
||||
from ops_sunbeam.config_contexts import (
|
||||
ConfigContext,
|
||||
)
|
||||
from ops_sunbeam.relation_handlers import (
|
||||
RelationHandler,
|
||||
)
|
||||
|
||||
ContainerConfigFile = collections.namedtuple(
|
||||
"ContainerConfigFile",
|
||||
["path", "user", "group", "permissions"],
|
||||
defaults=(None,),
|
||||
)
|
||||
|
||||
|
||||
class OPSCharmContexts:
|
||||
"""Set of config contexts and contexts from relation handlers."""
|
||||
|
||||
def __init__(self, charm: "OSBaseOperatorCharm") -> None:
|
||||
"""Run constructor."""
|
||||
self.charm = charm
|
||||
self.namespaces = []
|
||||
|
||||
def add_relation_handler(self, handler: "RelationHandler") -> None:
|
||||
"""Add relation handler."""
|
||||
interface, relation_name = handler.get_interface()
|
||||
_ns = relation_name.replace("-", "_")
|
||||
self.namespaces.append(_ns)
|
||||
ctxt = handler.context()
|
||||
obj_name = "".join([w.capitalize() for w in relation_name.split("-")])
|
||||
obj = collections.namedtuple(obj_name, ctxt.keys())(*ctxt.values())
|
||||
setattr(self, _ns, obj)
|
||||
# Add special sobriquet for peers.
|
||||
if _ns == "peers":
|
||||
self.namespaces.append("leader_db")
|
||||
setattr(self, "leader_db", obj)
|
||||
|
||||
def add_config_contexts(
|
||||
self, config_adapters: List["ConfigContext"]
|
||||
) -> None:
|
||||
"""Add multiple config contexts."""
|
||||
for config_adapter in config_adapters:
|
||||
self.add_config_context(config_adapter, config_adapter.namespace)
|
||||
|
||||
def add_config_context(
|
||||
self, config_adapter: "ConfigContext", namespace: str
|
||||
) -> None:
|
||||
"""Add add config adapter to context."""
|
||||
self.namespaces.append(namespace)
|
||||
setattr(self, namespace, config_adapter)
|
||||
|
||||
def __iter__(
|
||||
self,
|
||||
) -> Generator[
|
||||
Tuple[str, Union["ConfigContext", "RelationHandler"]], None, None
|
||||
]:
|
||||
"""Iterate over the relations presented to the charm."""
|
||||
for namespace in self.namespaces:
|
||||
yield namespace, getattr(self, namespace)
|
132
ops-sunbeam/ops_sunbeam/guard.py
Normal file
132
ops-sunbeam/ops_sunbeam/guard.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Module to handle errors and bailing out of an event/hook."""
|
||||
|
||||
import logging
|
||||
from contextlib import (
|
||||
contextmanager,
|
||||
)
|
||||
|
||||
from ops.charm import (
|
||||
CharmBase,
|
||||
)
|
||||
from ops.model import (
|
||||
BlockedStatus,
|
||||
MaintenanceStatus,
|
||||
WaitingStatus,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GuardExceptionError(Exception):
|
||||
"""GuardException."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class BaseStatusExceptionError(Exception):
|
||||
"""Charm is blocked."""
|
||||
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
super().__init__(self.msg)
|
||||
|
||||
|
||||
class BlockedExceptionError(BaseStatusExceptionError):
|
||||
"""Charm is blocked."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MaintenanceExceptionError(BaseStatusExceptionError):
|
||||
"""Charm is performing maintenance."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class WaitingExceptionError(BaseStatusExceptionError):
|
||||
"""Charm is waiting."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def guard(
|
||||
charm: "CharmBase",
|
||||
section: str,
|
||||
handle_exception: bool = True,
|
||||
log_traceback: bool = True,
|
||||
**__,
|
||||
) -> None:
|
||||
"""Context manager to handle errors and bailing out of an event/hook.
|
||||
|
||||
The nature of Juju is that all the information may not be available to run
|
||||
a set of actions. This context manager allows a section of code to be
|
||||
'guarded' so that it can be bailed at any time.
|
||||
|
||||
It also handles errors which can be interpreted as a Block rather than the
|
||||
charm going into error.
|
||||
|
||||
:param charm: the charm class (so that status can be set)
|
||||
:param section: the name of the section (for debugging/info purposes)
|
||||
:handle_exception: whether to handle the exception to a BlockedStatus()
|
||||
:log_traceback: whether to log the traceback for debugging purposes.
|
||||
:raises: Exception if handle_exception is False
|
||||
"""
|
||||
logger.info("Entering guarded section: '%s'", section)
|
||||
try:
|
||||
yield
|
||||
logging.info("Completed guarded section fully: '%s'", section)
|
||||
except GuardExceptionError as e:
|
||||
logger.info(
|
||||
"Guarded Section: Early exit from '%s' due to '%s'.",
|
||||
section,
|
||||
str(e),
|
||||
)
|
||||
except BlockedExceptionError as e:
|
||||
logger.warning(
|
||||
"Charm is blocked in section '%s' due to '%s'", section, str(e)
|
||||
)
|
||||
charm.status.set(BlockedStatus(e.msg))
|
||||
except WaitingExceptionError as e:
|
||||
logger.warning(
|
||||
"Charm is waiting in section '%s' due to '%s'", section, str(e)
|
||||
)
|
||||
charm.status.set(WaitingStatus(e.msg))
|
||||
except MaintenanceExceptionError as e:
|
||||
logger.warning(
|
||||
"Charm performing maintenance in section '%s' due to '%s'",
|
||||
section,
|
||||
str(e),
|
||||
)
|
||||
charm.status.set(MaintenanceStatus(e.msg))
|
||||
except Exception as e:
|
||||
# something else went wrong
|
||||
if handle_exception:
|
||||
logging.error(
|
||||
"Exception raised in section '%s': %s", section, str(e)
|
||||
)
|
||||
if log_traceback:
|
||||
import traceback
|
||||
|
||||
logging.error(traceback.format_exc())
|
||||
charm.status.set(
|
||||
BlockedStatus(
|
||||
"Error in charm (see logs): {}".format(str(e))
|
||||
)
|
||||
)
|
||||
return
|
||||
raise
|
161
ops-sunbeam/ops_sunbeam/interfaces.py
Normal file
161
ops-sunbeam/ops_sunbeam/interfaces.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Common interfaces not charm specific."""
|
||||
|
||||
import logging
|
||||
from typing import (
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
)
|
||||
|
||||
import ops.model
|
||||
from ops.framework import (
|
||||
EventBase,
|
||||
EventSource,
|
||||
Object,
|
||||
ObjectEvents,
|
||||
StoredState,
|
||||
)
|
||||
|
||||
|
||||
class PeersRelationCreatedEvent(EventBase):
|
||||
"""The PeersRelationCreatedEvent indicates that peer relation now exists.
|
||||
|
||||
It does not indicate that any peers are available or have joined, simply
|
||||
that the relation exists. This is useful to to indicate that the
|
||||
application databag is available for storing information shared across
|
||||
units.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PeersDataChangedEvent(EventBase):
|
||||
"""The PeersDataChangedEvent indicates peer data hjas changed."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PeersRelationJoinedEvent(EventBase):
|
||||
"""The PeersRelationJoinedEvent indicates a new unit has joined."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PeersEvents(ObjectEvents):
|
||||
"""Peer Events."""
|
||||
|
||||
peers_relation_created = EventSource(PeersRelationCreatedEvent)
|
||||
peers_relation_joined = EventSource(PeersRelationJoinedEvent)
|
||||
peers_data_changed = EventSource(PeersDataChangedEvent)
|
||||
|
||||
|
||||
class OperatorPeers(Object):
|
||||
"""Interface for the peers relation."""
|
||||
|
||||
on = PeersEvents()
|
||||
state = StoredState()
|
||||
|
||||
def __init__(self, charm: ops.charm.CharmBase, relation_name: str) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(charm, relation_name)
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
charm.on[relation_name].relation_created, self.on_created
|
||||
)
|
||||
self.framework.observe(
|
||||
charm.on[relation_name].relation_joined, self.on_joined
|
||||
)
|
||||
self.framework.observe(
|
||||
charm.on[relation_name].relation_changed, self.on_changed
|
||||
)
|
||||
|
||||
@property
|
||||
def peers_rel(self) -> ops.model.Relation:
|
||||
"""Peer relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
@property
|
||||
def _app_data_bag(self) -> Dict[str, str]:
|
||||
"""Return all app data on peer relation."""
|
||||
if not self.peers_rel:
|
||||
return {}
|
||||
return self.peers_rel.data[self.peers_rel.app]
|
||||
|
||||
def on_joined(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle relation joined event."""
|
||||
logging.info("Peer joined")
|
||||
self.on.peers_relation_joined.emit()
|
||||
|
||||
def on_created(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle relation created event."""
|
||||
logging.info("Peers on_created")
|
||||
self.on.peers_relation_created.emit()
|
||||
|
||||
def on_changed(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle relation changed event."""
|
||||
logging.info("Peers on_changed")
|
||||
self.on.peers_data_changed.emit()
|
||||
|
||||
def set_app_data(self, settings: Dict[str, str]) -> None:
|
||||
"""Publish settings on the peer app data bag."""
|
||||
for k, v in settings.items():
|
||||
self._app_data_bag[k] = v
|
||||
|
||||
def get_app_data(self, key: str) -> Optional[str]:
|
||||
"""Get the value corresponding to key from the app data bag."""
|
||||
if not self.peers_rel:
|
||||
return None
|
||||
return self._app_data_bag.get(key)
|
||||
|
||||
def get_all_app_data(self) -> Dict[str, str]:
|
||||
"""Return all the app data from the relation."""
|
||||
return self._app_data_bag
|
||||
|
||||
def get_all_unit_values(
|
||||
self, key: str, include_local_unit: bool = False
|
||||
) -> List[str]:
|
||||
"""Retrieve value for key from all related units.
|
||||
|
||||
:param include_local_unit: Include value set by local unit
|
||||
"""
|
||||
values = []
|
||||
if not self.peers_rel:
|
||||
return values
|
||||
for unit in self.peers_rel.units:
|
||||
value = self.peers_rel.data[unit].get(key)
|
||||
if value is not None:
|
||||
values.append(value)
|
||||
local_unit_value = self.peers_rel.data[self.model.unit].get(key)
|
||||
if include_local_unit and local_unit_value:
|
||||
values.append(local_unit_value)
|
||||
return values
|
||||
|
||||
def set_unit_data(self, settings: Dict[str, str]) -> None:
|
||||
"""Publish settings on the peer unit data bag."""
|
||||
for k, v in settings.items():
|
||||
self.peers_rel.data[self.model.unit][k] = v
|
||||
|
||||
def all_joined_units(self) -> List[ops.model.Unit]:
|
||||
"""All remote units joined to the peer relation."""
|
||||
return set(self.peers_rel.units)
|
||||
|
||||
def expected_peer_units(self) -> int:
|
||||
"""Return the Number of units expected on relation.
|
||||
|
||||
NOTE: This count includes this unit
|
||||
"""
|
||||
return self.peers_rel.app.planned_units()
|
103
ops-sunbeam/ops_sunbeam/job_ctrl.py
Normal file
103
ops-sunbeam/ops_sunbeam/job_ctrl.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Helpers for controlling whether jobs should run.
|
||||
|
||||
In general it is better for a command to be a noop if run when it is not
|
||||
needed but in some cases the commands are expensive or disruptive in which
|
||||
case these helpers can limit how frequently they are run.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from functools import (
|
||||
wraps,
|
||||
)
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
import ops.framework
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import ops_sunbeam.charm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_once_per_unit(label):
|
||||
"""Run once per instantiation of a unit.
|
||||
|
||||
This is designed for commands which only need to be run once on each
|
||||
instantiation of a unit.
|
||||
|
||||
Note: This decorator can only be used within a charm derived from
|
||||
ops_sunbeam.charm.OSBaseOperatorCharm.
|
||||
|
||||
Example usage:
|
||||
|
||||
class MyCharm(ops_sunbeam.charm.OSBaseOperatorCharm):
|
||||
...
|
||||
@run_once_per_unit('a2enmod')
|
||||
def enable_apache_module(self):
|
||||
check_call(['a2enmod', 'wsgi'])
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@wraps(f)
|
||||
def wrapped_f(
|
||||
charm: "ops_sunbeam.charm.OSBaseOperatorCharm", *args, **kwargs
|
||||
):
|
||||
"""Run once decorator.
|
||||
|
||||
:param charm: Instance of charm
|
||||
"""
|
||||
storage = LocalJobStorage(charm._state)
|
||||
if label in storage:
|
||||
logging.warning(
|
||||
f"Not running {label}, it has run previously for this unit"
|
||||
)
|
||||
else:
|
||||
logging.warning(
|
||||
f"Running {label}, it has not run on this unit before"
|
||||
)
|
||||
f(charm, *args, **kwargs)
|
||||
storage.add(label)
|
||||
|
||||
return wrapped_f
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
class LocalJobStorage:
|
||||
"""Class to store job info of jobs run on the local unit."""
|
||||
|
||||
def __init__(self, storage: ops.framework.BoundStoredState):
|
||||
"""Setup job history storage."""
|
||||
self.storage = storage
|
||||
try:
|
||||
self.storage.run_once
|
||||
except AttributeError:
|
||||
self.storage.run_once = {}
|
||||
|
||||
def get_labels(self):
|
||||
"""Return all job entries."""
|
||||
return self.storage.run_once
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Check if label is in list or run jobs."""
|
||||
return key in self.get_labels().keys()
|
||||
|
||||
def add(self, key):
|
||||
"""Add the label of job that has run."""
|
||||
self.storage.run_once[key] = str(time.time())
|
15
ops-sunbeam/ops_sunbeam/ovn/__init__.py
Normal file
15
ops-sunbeam/ops_sunbeam/ovn/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Library for shared OVN code for ops charms."""
|
43
ops-sunbeam/ops_sunbeam/ovn/charm.py
Normal file
43
ops-sunbeam/ops_sunbeam/ovn/charm.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining an OVN charm using the Operator framework."""
|
||||
|
||||
from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
from .. import charm as sunbeam_charm
|
||||
from .. import relation_handlers as sunbeam_rhandlers
|
||||
from . import relation_handlers as ovn_relation_handlers
|
||||
|
||||
|
||||
class OSBaseOVNOperatorCharm(sunbeam_charm.OSBaseOperatorCharmK8S):
|
||||
"""Base charms for OpenStack operators."""
|
||||
|
||||
def get_relation_handlers(
|
||||
self, handlers: List[sunbeam_rhandlers.RelationHandler] = None
|
||||
) -> List[sunbeam_rhandlers.RelationHandler]:
|
||||
"""Relation handlers for the service."""
|
||||
handlers = handlers or []
|
||||
if self.can_add_handler("ovsdb-cms", handlers):
|
||||
self.ovsdb_cms = ovn_relation_handlers.OVSDBCMSRequiresHandler(
|
||||
self,
|
||||
"ovsdb-cms",
|
||||
self.configure_charm,
|
||||
"ovsdb-cms" in self.mandatory_relations,
|
||||
)
|
||||
handlers.append(self.ovsdb_cms)
|
||||
handlers = super().get_relation_handlers(handlers)
|
||||
return handlers
|
35
ops-sunbeam/ops_sunbeam/ovn/config_contexts.py
Normal file
35
ops-sunbeam/ops_sunbeam/ovn/config_contexts.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining a config context using the Operator framework.
|
||||
|
||||
ConfigContext objects can be used when rendering templates. They idea is to
|
||||
create reusable contexts which translate charm config, deployment state etc.
|
||||
These are not specific to a relation.
|
||||
"""
|
||||
|
||||
from .. import config_contexts as sunbeam_ccontexts
|
||||
|
||||
|
||||
class OVNDBConfigContext(sunbeam_ccontexts.ConfigContext):
|
||||
"""Context for OVN charms."""
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Context for OVN certs and leadership."""
|
||||
return {
|
||||
"is_charm_leader": self.charm.unit.is_leader(),
|
||||
"ovn_key": "/etc/ovn/key_host",
|
||||
"ovn_cert": "/etc/ovn/cert_host",
|
||||
"ovn_ca_cert": "/etc/ovn/ovn-central.crt",
|
||||
}
|
123
ops-sunbeam/ops_sunbeam/ovn/container_handlers.py
Normal file
123
ops-sunbeam/ops_sunbeam/ovn/container_handlers.py
Normal file
@@ -0,0 +1,123 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining OVN Pebble handlers."""
|
||||
|
||||
from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
)
|
||||
|
||||
from .. import container_handlers as sunbeam_chandlers
|
||||
from .. import core as sunbeam_core
|
||||
|
||||
|
||||
class OVNPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||
"""Common class for OVN services."""
|
||||
|
||||
@property
|
||||
def wrapper_script(self) -> str:
|
||||
"""Path to OVN service wrapper."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def status_command(self) -> str:
|
||||
"""Command to check status of service."""
|
||||
raise NotImplementedError
|
||||
|
||||
def init_service(self, context: sunbeam_core.OPSCharmContexts) -> None:
|
||||
"""Initialise service ready for use.
|
||||
|
||||
Write configuration files to the container and record
|
||||
that service is ready for us.
|
||||
|
||||
NOTE: Override default to services being automatically started
|
||||
"""
|
||||
self.setup_dirs()
|
||||
self.write_config(context)
|
||||
self.status.set(ActiveStatus(""))
|
||||
|
||||
@property
|
||||
def service_description(self) -> str:
|
||||
"""Return a short description of service e.g. OVN Southbound DB."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Pebble configuration layer for OVN service.
|
||||
|
||||
:returns: pebble layer configuration for service
|
||||
:rtype: dict
|
||||
"""
|
||||
return {
|
||||
"summary": f"{self.service_description} service",
|
||||
"description": (
|
||||
"Pebble config layer for " f"{self.service_description}"
|
||||
),
|
||||
"services": {
|
||||
self.service_name: {
|
||||
"override": "replace",
|
||||
"summary": f"{self.service_description}",
|
||||
"command": f"bash {self.wrapper_script}",
|
||||
"startup": "disabled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def get_healthcheck_layer(self) -> dict:
|
||||
"""Health check pebble layer.
|
||||
|
||||
:returns: pebble health check layer configuration for OVN service
|
||||
:rtype: dict
|
||||
"""
|
||||
return {
|
||||
"checks": {
|
||||
"online": {
|
||||
"override": "replace",
|
||||
"level": "ready",
|
||||
"exec": {"command": f"{self.status_command}"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@property
|
||||
def directories(self) -> List[sunbeam_chandlers.ContainerDir]:
|
||||
"""Directories to creete in container."""
|
||||
return [
|
||||
sunbeam_chandlers.ContainerDir("/etc/ovn", "root", "root"),
|
||||
sunbeam_chandlers.ContainerDir("/run/ovn", "root", "root"),
|
||||
sunbeam_chandlers.ContainerDir("/var/lib/ovn", "root", "root"),
|
||||
sunbeam_chandlers.ContainerDir("/var/log/ovn", "root", "root"),
|
||||
]
|
||||
|
||||
def default_container_configs(
|
||||
self,
|
||||
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||
"""Files to render into containers."""
|
||||
return [
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
self.wrapper_script, "root", "root"
|
||||
),
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
"/etc/ovn/key_host", "root", "root"
|
||||
),
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
"/etc/ovn/cert_host", "root", "root"
|
||||
),
|
||||
sunbeam_core.ContainerConfigFile(
|
||||
"/etc/ovn/ovn-central.crt", "root", "root"
|
||||
),
|
||||
]
|
588
ops-sunbeam/ops_sunbeam/ovn/relation_handlers.py
Normal file
588
ops-sunbeam/ops_sunbeam/ovn/relation_handlers.py
Normal file
@@ -0,0 +1,588 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for defining OVN relation handlers."""
|
||||
|
||||
import ipaddress
|
||||
import itertools
|
||||
import logging
|
||||
import socket
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
)
|
||||
|
||||
import ops.charm
|
||||
import ops.framework
|
||||
from ops.model import (
|
||||
BlockedStatus,
|
||||
)
|
||||
|
||||
from .. import relation_handlers as sunbeam_rhandlers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OVNRelationUtils:
|
||||
"""Common utilities for processing OVN relations."""
|
||||
|
||||
DB_NB_PORT = 6641
|
||||
DB_SB_PORT = 6642
|
||||
DB_SB_ADMIN_PORT = 16642
|
||||
DB_NB_CLUSTER_PORT = 6643
|
||||
DB_SB_CLUSTER_PORT = 6644
|
||||
|
||||
def _format_addr(self, addr: str) -> str:
|
||||
"""Validate and format IP address.
|
||||
|
||||
:param addr: IPv6 or IPv4 address
|
||||
:type addr: str
|
||||
:returns: Address string, optionally encapsulated in brackets ([])
|
||||
:rtype: str
|
||||
:raises: ValueError
|
||||
"""
|
||||
ipaddr = ipaddress.ip_address(addr)
|
||||
if isinstance(ipaddr, ipaddress.IPv6Address):
|
||||
fmt = "[{}]"
|
||||
else:
|
||||
fmt = "{}"
|
||||
return fmt.format(ipaddr)
|
||||
|
||||
def _remote_addrs(self, key: str) -> Iterator[str]:
|
||||
"""Retrieve addresses published by remote units.
|
||||
|
||||
:param key: Relation data key to retrieve value from.
|
||||
:type key: str
|
||||
:returns: addresses published by remote units.
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
for addr in self.interface.get_all_unit_values(key):
|
||||
try:
|
||||
addr = self._format_addr(addr)
|
||||
yield addr
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
def _remote_hostnames(self, key: str) -> Iterator[str]:
|
||||
"""Retrieve hostnames published by remote units.
|
||||
|
||||
:param key: Relation data key to retrieve value from.
|
||||
:type key: str
|
||||
:returns: hostnames published by remote units.
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
for hostname in self.interface.get_all_unit_values(key):
|
||||
yield hostname
|
||||
|
||||
@property
|
||||
def cluster_remote_hostnames(self) -> Iterator[str]:
|
||||
"""Retrieve remote hostnames bound to remote endpoint.
|
||||
|
||||
:returns: hostnames bound to remote endpoints.
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return self._remote_hostnames("bound-hostname")
|
||||
|
||||
@property
|
||||
def cluster_remote_addrs(self) -> Iterator[str]:
|
||||
"""Retrieve remote addresses bound to remote endpoint.
|
||||
|
||||
:returns: addresses bound to remote endpoints.
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return self._remote_addrs("bound-address")
|
||||
|
||||
@property
|
||||
def cluster_remote_ingress_addrs(self) -> Iterator[str]:
|
||||
"""Retrieve remote addresses bound to remote endpoint.
|
||||
|
||||
:returns: addresses bound to remote endpoints.
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return self._remote_addrs("ingress-bound-address")
|
||||
|
||||
def db_connection_strs(
|
||||
self, hostnames: List[str], port: int, proto: str = "ssl"
|
||||
) -> Iterator[str]:
|
||||
"""Provide connection strings.
|
||||
|
||||
:param hostnames: List of hostnames to include in conn strs
|
||||
:type hostnames: List[str]
|
||||
:param port: Port number
|
||||
:type port: int
|
||||
:param proto: Protocol
|
||||
:type proto: str
|
||||
:returns: connection strings
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
for hostname in hostnames:
|
||||
yield ":".join((proto, str(hostname), str(port)))
|
||||
|
||||
@property
|
||||
def db_nb_port(self) -> int:
|
||||
"""Provide port number for OVN Northbound OVSDB.
|
||||
|
||||
:returns: port number for OVN Northbound OVSDB.
|
||||
:rtype: int
|
||||
"""
|
||||
return self.DB_NB_PORT
|
||||
|
||||
@property
|
||||
def db_sb_port(self) -> int:
|
||||
"""Provide port number for OVN Southbound OVSDB.
|
||||
|
||||
:returns: port number for OVN Southbound OVSDB.
|
||||
:rtype: int
|
||||
"""
|
||||
return self.DB_SB_PORT
|
||||
|
||||
@property
|
||||
def db_sb_admin_port(self) -> int:
|
||||
"""Provide admin port number for OVN Southbound OVSDB.
|
||||
|
||||
This is a special listener to allow ``ovn-northd`` to connect to an
|
||||
endpoint without RBAC enabled as there is currently no RBAC profile
|
||||
allowing ``ovn-northd`` to perform its work.
|
||||
|
||||
:returns: admin port number for OVN Southbound OVSDB.
|
||||
:rtype: int
|
||||
"""
|
||||
return self.DB_SB_ADMIN_PORT
|
||||
|
||||
@property
|
||||
def db_nb_cluster_port(self) -> int:
|
||||
"""Provide port number for OVN Northbound OVSDB.
|
||||
|
||||
:returns port number for OVN Northbound OVSDB.
|
||||
:rtype: int
|
||||
"""
|
||||
return self.DB_NB_CLUSTER_PORT
|
||||
|
||||
@property
|
||||
def db_sb_cluster_port(self) -> int:
|
||||
"""Provide port number for OVN Southbound OVSDB.
|
||||
|
||||
:returns: port number for OVN Southbound OVSDB.
|
||||
:rtype: int
|
||||
"""
|
||||
return self.DB_SB_CLUSTER_PORT
|
||||
|
||||
@property
|
||||
def db_nb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Northbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Northbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_addrs, self.db_nb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def db_sb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Southbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Southbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_addrs, self.db_sb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def db_ingress_nb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Northbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Northbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_ingress_addrs, self.db_nb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def db_ingress_sb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Southbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Southbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_ingress_addrs, self.db_sb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def db_nb_connection_hostname_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Northbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Northbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_nb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def db_sb_connection_hostname_strs(self) -> Iterator[str]:
|
||||
"""Provide OVN Southbound OVSDB connection strings.
|
||||
|
||||
:returns: OVN Southbound OVSDB connection strings.
|
||||
:rtpye: Iterator[str]
|
||||
"""
|
||||
return self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_sb_port
|
||||
)
|
||||
|
||||
@property
|
||||
def cluster_local_addr(self) -> ipaddress.IPv4Address:
|
||||
"""Retrieve local address bound to endpoint.
|
||||
|
||||
:returns: IPv4 or IPv6 address bound to endpoint
|
||||
:rtype: str
|
||||
"""
|
||||
return self._endpoint_local_bound_addr()
|
||||
|
||||
@property
|
||||
def cluster_ingress_addr(self) -> ipaddress.IPv4Address:
|
||||
"""Retrieve local address bound to endpoint.
|
||||
|
||||
:returns: IPv4 or IPv6 address bound to endpoint
|
||||
:rtype: str
|
||||
"""
|
||||
addresses = self._endpoint_ingress_bound_addresses()
|
||||
if len(addresses) > 1:
|
||||
logger.debug("Found multiple ingress addresses, picking first one")
|
||||
address = addresses[0]
|
||||
elif len(addresses) == 1:
|
||||
address = addresses[0]
|
||||
else:
|
||||
logger.debug("Found no ingress addresses")
|
||||
address = None
|
||||
return address
|
||||
|
||||
@property
|
||||
def cluster_local_hostname(self) -> str:
|
||||
"""Retrieve local hostname for unit.
|
||||
|
||||
:returns: Resolvable hostname for local unit.
|
||||
:rtype: str
|
||||
"""
|
||||
return socket.getfqdn()
|
||||
|
||||
def _endpoint_local_bound_addr(self) -> ipaddress.IPv4Address:
|
||||
"""Retrieve local address bound to endpoint.
|
||||
|
||||
:returns: IPv4 or IPv6 address bound to endpoint
|
||||
"""
|
||||
addr = None
|
||||
for relation in self.charm.model.relations.get(self.relation_name, []):
|
||||
binding = self.charm.model.get_binding(relation)
|
||||
addr = binding.network.bind_address
|
||||
break
|
||||
return addr
|
||||
|
||||
def _endpoint_ingress_bound_addresses(self) -> ipaddress.IPv4Address:
|
||||
"""Retrieve local address bound to endpoint.
|
||||
|
||||
:returns: IPv4 or IPv6 address bound to endpoint
|
||||
"""
|
||||
addresses = []
|
||||
for relation in self.charm.model.relations.get(self.relation_name, []):
|
||||
binding = self.charm.model.get_binding(relation)
|
||||
addresses.extend(binding.network.ingress_addresses)
|
||||
return list(set(addresses))
|
||||
|
||||
|
||||
class OVNDBClusterPeerHandler(
|
||||
sunbeam_rhandlers.BasePeerHandler, OVNRelationUtils
|
||||
):
|
||||
"""Handle OVN peer relation."""
|
||||
|
||||
def publish_cluster_local_hostname(self, hostname: str = None) -> Dict:
|
||||
"""Announce hostname on relation.
|
||||
|
||||
This will be used by our peers and clients to build a connection
|
||||
string to the remote cluster.
|
||||
|
||||
:param hostname: Override hostname to announce.
|
||||
:type hostname: Optional[str]
|
||||
"""
|
||||
_hostname = hostname or self.cluster_local_hostname
|
||||
if _hostname:
|
||||
self.interface.set_unit_data({"bound-hostname": str(_hostname)})
|
||||
|
||||
def expected_peers_available(self) -> bool:
|
||||
"""Whether expected peers have joined and published data on peer rel.
|
||||
|
||||
NOTE: This does not work for the normal inter-charm relations, please
|
||||
refer separate method for that in the shared interface library.
|
||||
|
||||
:returns: True if expected peers have joined and published data,
|
||||
False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
joined_units = self.interface.all_joined_units()
|
||||
# Remove this unit from expected_peer_units count
|
||||
expected_remote_units = self.interface.expected_peer_units() - 1
|
||||
if len(joined_units) < expected_remote_units:
|
||||
logging.debug(
|
||||
f"Expected {expected_remote_units} but only {joined_units} "
|
||||
"have joined so far"
|
||||
)
|
||||
return False
|
||||
hostnames = self.interface.get_all_unit_values("bound-hostname")
|
||||
if len(hostnames) < expected_remote_units:
|
||||
logging.debug(
|
||||
"Not all units have published a bound-hostname. Current "
|
||||
f"hostname list: {hostnames}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logging.debug(
|
||||
f"All expected peers are present. Hostnames: {hostnames}"
|
||||
)
|
||||
return True
|
||||
|
||||
@property
|
||||
def db_nb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide Northbound DB connection strings.
|
||||
|
||||
We override the parent property because for the peer relation
|
||||
``cluster_remote_hostnames`` does not contain self.
|
||||
|
||||
:returns: Northbound DB connection strings
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return itertools.chain(
|
||||
self.db_connection_strs(
|
||||
(self.cluster_local_hostname,), self.db_nb_port
|
||||
),
|
||||
self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_nb_port
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def db_nb_cluster_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide Northbound DB Cluster connection strings.
|
||||
|
||||
We override the parent property because for the peer relation
|
||||
``cluster_remote_hostnames`` does not contain self.
|
||||
|
||||
:returns: Northbound DB connection strings
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return itertools.chain(
|
||||
self.db_connection_strs(
|
||||
(self.cluster_local_hostname,), self.db_nb_cluster_port
|
||||
),
|
||||
self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_nb_cluster_port
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def db_sb_cluster_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide Southbound DB Cluster connection strings.
|
||||
|
||||
We override the parent property because for the peer relation
|
||||
``cluster_remote_hostnames`` does not contain self.
|
||||
|
||||
:returns: Southbound DB connection strings
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return itertools.chain(
|
||||
self.db_connection_strs(
|
||||
(self.cluster_local_hostname,), self.db_sb_cluster_port
|
||||
),
|
||||
self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_sb_cluster_port
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def db_sb_connection_strs(self) -> Iterator[str]:
|
||||
"""Provide Southbound DB connection strings.
|
||||
|
||||
We override the parent property because for the peer relation
|
||||
``cluster_remote_hostnames`` does not contain self. We use a different
|
||||
port for connecting to the SB DB as there is currently no RBAC profile
|
||||
that provide the privileges ``ovn-northd`` requires to operate.
|
||||
|
||||
:returns: Southbound DB connection strings
|
||||
:rtype: Iterator[str]
|
||||
"""
|
||||
return itertools.chain(
|
||||
self.db_connection_strs(
|
||||
(self.cluster_local_hostname,), self.db_sb_admin_port
|
||||
),
|
||||
self.db_connection_strs(
|
||||
self.cluster_remote_hostnames, self.db_sb_admin_port
|
||||
),
|
||||
)
|
||||
|
||||
def _on_peers_relation_joined(
|
||||
self, event: ops.framework.EventBase
|
||||
) -> None:
|
||||
"""Process peer joined event."""
|
||||
self.publish_cluster_local_hostname()
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Context from relation data."""
|
||||
ctxt = super().context()
|
||||
ctxt.update(
|
||||
{
|
||||
"cluster_local_hostname": self.cluster_local_hostname,
|
||||
"cluster_remote_hostnames": self.cluster_remote_hostnames,
|
||||
"db_nb_cluster_connection_strs": self.db_nb_cluster_connection_strs,
|
||||
"db_sb_cluster_connection_strs": self.db_sb_cluster_connection_strs,
|
||||
"db_sb_cluster_port": self.db_sb_cluster_port,
|
||||
"db_nb_cluster_port": self.db_nb_cluster_port,
|
||||
"db_nb_connection_strs": list(self.db_nb_connection_strs),
|
||||
"db_sb_connection_strs": list(self.db_sb_connection_strs),
|
||||
}
|
||||
)
|
||||
return ctxt
|
||||
|
||||
|
||||
class OVSDBCMSProvidesHandler(
|
||||
sunbeam_rhandlers.RelationHandler, OVNRelationUtils
|
||||
):
|
||||
"""Handle provides side of ovsdb-cms."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: ops.charm.CharmBase,
|
||||
relation_name: str,
|
||||
callback_f: Callable,
|
||||
mandatory: bool = False,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(charm, relation_name, callback_f, mandatory)
|
||||
self._update_address_data()
|
||||
|
||||
def setup_event_handler(self) -> ops.charm.Object:
|
||||
"""Configure event handlers for an Identity service relation."""
|
||||
# Lazy import to ensure this lib is only required if the charm
|
||||
# has this relation.
|
||||
logger.debug("Setting up ovs-cms provides event handler")
|
||||
import charms.ovn_central_k8s.v0.ovsdb as ovsdb
|
||||
|
||||
ovsdb_svc = ovsdb.OVSDBCMSProvides(
|
||||
self.charm,
|
||||
self.relation_name,
|
||||
)
|
||||
self.framework.observe(
|
||||
ovsdb_svc.on.ready, self._on_ovsdb_service_ready
|
||||
)
|
||||
return ovsdb_svc
|
||||
|
||||
def _on_ovsdb_service_ready(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle OVSDB CMS change events."""
|
||||
self.callback_f(event)
|
||||
|
||||
def _update_address_data(self) -> None:
|
||||
"""Update hostname and IP address data on all relations."""
|
||||
self.interface.set_unit_data(
|
||||
{
|
||||
"bound-hostname": str(self.cluster_local_hostname),
|
||||
"bound-address": str(self.cluster_local_addr),
|
||||
"ingress-bound-address": str(self.cluster_ingress_addr),
|
||||
}
|
||||
)
|
||||
|
||||
@property
|
||||
def ready(self) -> bool:
|
||||
"""Whether the interface is ready."""
|
||||
return True
|
||||
|
||||
|
||||
class OVSDBCMSRequiresHandler(
|
||||
sunbeam_rhandlers.RelationHandler, OVNRelationUtils
|
||||
):
|
||||
"""Handle provides side of ovsdb-cms."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: ops.charm.CharmBase,
|
||||
relation_name: str,
|
||||
callback_f: Callable,
|
||||
mandatory: bool = False,
|
||||
) -> None:
|
||||
"""Run constructor."""
|
||||
super().__init__(charm, relation_name, callback_f, mandatory)
|
||||
|
||||
def setup_event_handler(self) -> ops.charm.Object:
|
||||
"""Configure event handlers for an Identity service relation."""
|
||||
# Lazy import to ensure this lib is only required if the charm
|
||||
# has this relation.
|
||||
logger.debug("Setting up ovs-cms requires event handler")
|
||||
import charms.ovn_central_k8s.v0.ovsdb as ovsdb
|
||||
|
||||
ovsdb_svc = ovsdb.OVSDBCMSRequires(
|
||||
self.charm,
|
||||
self.relation_name,
|
||||
)
|
||||
self.framework.observe(
|
||||
ovsdb_svc.on.ready, self._on_ovsdb_service_ready
|
||||
)
|
||||
self.framework.observe(
|
||||
ovsdb_svc.on.goneaway, self._on_ovsdb_service_goneaway
|
||||
)
|
||||
return ovsdb_svc
|
||||
|
||||
def _on_ovsdb_service_ready(self, event: ops.framework.EventBase) -> None:
|
||||
"""Handle OVSDB CMS change events."""
|
||||
self.callback_f(event)
|
||||
|
||||
def _on_ovsdb_service_goneaway(
|
||||
self, event: ops.framework.EventBase
|
||||
) -> None:
|
||||
"""Handle OVSDB CMS change events."""
|
||||
self.callback_f(event)
|
||||
if self.mandatory:
|
||||
logger.debug("ovsdb-cms integration removed, stop services")
|
||||
self.charm.stop_services({self.relation_name})
|
||||
self.status.set(BlockedStatus("integration missing"))
|
||||
|
||||
@property
|
||||
def ready(self) -> bool:
|
||||
"""Whether the interface is ready."""
|
||||
return self.interface.remote_ready()
|
||||
|
||||
def context(self) -> dict:
|
||||
"""Context from relation data."""
|
||||
ctxt = super().context()
|
||||
ctxt.update(
|
||||
{
|
||||
"local_hostname": self.cluster_local_hostname,
|
||||
"hostnames": self.interface.bound_hostnames(),
|
||||
"local_address": self.cluster_local_addr,
|
||||
"addresses": self.interface.bound_addresses(),
|
||||
"db_ingress_sb_connection_strs": self.db_ingress_sb_connection_strs,
|
||||
"db_ingress_nb_connection_strs": self.db_ingress_nb_connection_strs,
|
||||
"db_sb_connection_strs": ",".join(self.db_sb_connection_strs),
|
||||
"db_nb_connection_strs": ",".join(self.db_nb_connection_strs),
|
||||
"db_sb_connection_hostname_strs": ",".join(
|
||||
self.db_sb_connection_hostname_strs
|
||||
),
|
||||
"db_nb_connection_hostname_strs": ",".join(
|
||||
self.db_nb_connection_hostname_strs
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
return ctxt
|
1785
ops-sunbeam/ops_sunbeam/relation_handlers.py
Normal file
1785
ops-sunbeam/ops_sunbeam/relation_handlers.py
Normal file
File diff suppressed because it is too large
Load Diff
92
ops-sunbeam/ops_sunbeam/templating.py
Normal file
92
ops-sunbeam/ops_sunbeam/templating.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Module for rendering templates inside containers."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from pathlib import (
|
||||
Path,
|
||||
)
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
List,
|
||||
)
|
||||
|
||||
import ops.pebble
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import ops_sunbeam.core as sunbeam_core
|
||||
import ops.model
|
||||
|
||||
import jinja2
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_container(
|
||||
containers: List["ops.model.Container"], name: str
|
||||
) -> "ops.model.Container":
|
||||
"""Search for container with given name inlist of containers."""
|
||||
container = None
|
||||
for c in containers:
|
||||
if c.name == name:
|
||||
container = c
|
||||
return container
|
||||
|
||||
|
||||
def sidecar_config_render(
|
||||
container: "ops.model.Container",
|
||||
config: "sunbeam_core.ContainerConfigFile",
|
||||
template_dir: str,
|
||||
context: "sunbeam_core.OPSCharmContexts",
|
||||
) -> bool:
|
||||
"""Render templates inside containers.
|
||||
|
||||
:return: Whether file was updated.
|
||||
:rtype: bool
|
||||
"""
|
||||
file_updated = False
|
||||
try:
|
||||
original_contents = container.pull(config.path).read()
|
||||
except (ops.pebble.PathError, FileNotFoundError):
|
||||
original_contents = None
|
||||
loader = jinja2.FileSystemLoader(template_dir)
|
||||
_tmpl_env = jinja2.Environment(loader=loader)
|
||||
try:
|
||||
template = _tmpl_env.get_template(
|
||||
os.path.basename(config.path) + ".j2"
|
||||
)
|
||||
except jinja2.exceptions.TemplateNotFound:
|
||||
template = _tmpl_env.get_template(os.path.basename(config.path))
|
||||
contents = template.render(context)
|
||||
if original_contents == contents:
|
||||
log.debug(
|
||||
f"{config.path} in {container.name} matches desired content."
|
||||
)
|
||||
else:
|
||||
kwargs = {
|
||||
"user": config.user,
|
||||
"group": config.group,
|
||||
"permissions": config.permissions,
|
||||
}
|
||||
parent_dir = str(Path(config.path).parent)
|
||||
if not container.isdir(parent_dir):
|
||||
container.make_dir(parent_dir, make_parents=True)
|
||||
container.push(config.path, contents, **kwargs)
|
||||
file_updated = True
|
||||
log.debug(
|
||||
f"Wrote template {config.path} in container {container.name}."
|
||||
)
|
||||
return file_updated
|
769
ops-sunbeam/ops_sunbeam/test_utils.py
Normal file
769
ops-sunbeam/ops_sunbeam/test_utils.py
Normal file
@@ -0,0 +1,769 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Module containing shared code to be used in a charms units tests."""
|
||||
|
||||
import collections
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
import typing
|
||||
import unittest
|
||||
from typing import (
|
||||
List,
|
||||
Optional,
|
||||
)
|
||||
|
||||
import ops
|
||||
from mock import (
|
||||
MagicMock,
|
||||
Mock,
|
||||
patch,
|
||||
)
|
||||
|
||||
sys.path.append("lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
from ops import (
|
||||
framework,
|
||||
model,
|
||||
)
|
||||
from ops.testing import (
|
||||
Harness,
|
||||
_TestingModelBackend,
|
||||
_TestingPebbleClient,
|
||||
)
|
||||
|
||||
TEST_CA = """-----BEGIN CERTIFICATE-----
|
||||
MIIDADCCAeigAwIBAgIUOTGfdiGSlKoiyWskxH1za0Nh7cYwDQYJKoZIhvcNAQEL
|
||||
BQAwGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MB4XDTIyMDIwNjE4MjYyM1oX
|
||||
DTMzMDEyMDE4MjYyM1owRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENl
|
||||
cnRpZmljYXRlIEF1dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTCCASIwDQYJKoZI
|
||||
hvcNAQEBBQADggEPADCCAQoCggEBAMvzFo76z05TU8ECnXpJC2b1mMQK6r5FD+9K
|
||||
CwxPUr6l5ar0rm3+CM/MQA0RBrR17Ql8kZab7gSEcVbbUUM825zqoin+ECsaYttb
|
||||
kYMHt5lhgEEPwOn9kWC2wh8bBym1eR1zZnpcy0UrclaZByQ7BH+KG3ENi0vozuxp
|
||||
xVgQV06wjBC9Bl3WeaUtMiYb/7CqPgTgZPBDL97eae8H3A29U5Xpr/qGf2Gx27pN
|
||||
zAyxOsuSDwSB8NrVEZRYAT/kvLku0c/ZmZpU2xIVOOsUkTF+r6b2OfLnqRajl7zs
|
||||
KatfnQUb4tCFZ3IO83VvlHS54PxDflTOb5qGSe1r21RTfM9gjmsCAwEAAaMTMBEw
|
||||
DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAUVXG2lGye4RV4NWZ
|
||||
rZ6OWmgzy3/wlMKRAt8tXsB2uaFqxg7QzIMfFsLCgRF5xJNS1faHmJIK391or3ip
|
||||
ZNgygS4eqWgBqqds60bB4s0JW+QEVfyKeB/tZHm83fZgEypwOs9N0EW/xLslNaFe
|
||||
zT8PgdjdzBW80l7KAMy4/GzZvvK7MWfkkhwwnY7oXs9F3q28gFIdcYyc9A1SDg/8
|
||||
8jWI6RP5yBcNS/PgUmVV+Ko1uTHxNsKjOn7QPuUgjMBeW0fpBCHVFxz7rs+orHNF
|
||||
JSWcYpOxivTh+YO8cAxAGlKzrgZDcXQDjGfF34U/v3niDUHO+CAk6Jz3io4Oxh2X
|
||||
GksTPQ==
|
||||
-----END CERTIFICATE-----"""
|
||||
|
||||
TEST_CHAIN = """-----BEGIN CERTIFICATE-----
|
||||
MIIDADCCAeigAwIBAgIUOTGfdiGSlKoiyWskxH1za0Nh7cYwDQYJKoZIhvcNAQEL
|
||||
BQAwGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MB4XDTIyMDIwNjE4MjYyM1oX
|
||||
DTMzMDEyMDE4MjYyM1owRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENl
|
||||
cnRpZmljYXRlIEF1dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTCCASIwDQYJKoZI
|
||||
hvcNAQEBBQADggEPADCCAQoCggEBAMvzFo76z05TU8ECnXpJC2b1mMQK6r5FD+9K
|
||||
CwxPUr6l5ar0rm3+CM/MQA0RBrR17Ql8kZab7gSEcVbbUUM825zqoin+ECsaYttb
|
||||
kYMHt5lhgEEPwOn9kWC2wh8bBym1eR1zZnpcy0UrclaZByQ7BH+KG3ENi0vozuxp
|
||||
xVgQV06wjBC9Bl3WeaUtMiYb/7CqPgTgZPBDL97eae8H3A29U5Xpr/qGf2Gx27pN
|
||||
zAyxOsuSDwSB8NrVEZRYAT/kvLku0c/ZmZpU2xIVOOsUkTF+r6b2OfLnqRajl7zs
|
||||
KatfnQUb4tCFZ3IO83VvlHS54PxDflTOb5qGSe1r21RTfM9gjmsCAwEAAaMTMBEw
|
||||
DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAUVXG2lGye4RV4NWZ
|
||||
rZ6OWmgzy3/wlMKRAt8tXsB2uaFqxg7QzIMfFsLCgRF5xJNS1faHmJIK391or3ip
|
||||
ZNgygS4eqWgBqqds60bB4s0JW+QEVfyKeB/tZHm83fZgEypwOs9N0EW/xLslNaFe
|
||||
zT8PgdjdzBW80l7KAMy4/GzZvvK7MWfkkhwwnY7oXs9F3q28gFIdcYyc9A1SDg/8
|
||||
8jWI6RP5yBcNS/PgUmVV+Ko1uTHxNsKjOn7QPuUgjMBeW0fpBCHVFxz7rs+orHNF
|
||||
JSWcYpOxivTh+YO8cAxAGlKzrgZDcXQDjGfF34U/v3niDUHO+CAk6Jz3io4Oxh2X
|
||||
GksTPQ==
|
||||
-----END CERTIFICATE-----"""
|
||||
|
||||
TEST_SERVER_CERT = """-----BEGIN CERTIFICATE-----
|
||||
MIIEEzCCAvugAwIBAgIUIRVQ0iFgTDBP+Ju6AlcnxTHywUgwDQYJKoZIhvcNAQEL
|
||||
BQAwRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENlcnRpZmljYXRlIEF1
|
||||
dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTAeFw0yMjAyMDcxODI1NTlaFw0yMzAy
|
||||
MDcxNzI2MjhaMCsxKTAnBgNVBAMTIGp1anUtOTNiMDlkLXphemEtYWMzMDBhNjEz
|
||||
OTI2LTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4VYeKjC3o9GZ
|
||||
AnbuVBudyd/a5sHnaGZlMJz8zevhGr5nARRR194bgR8VSB9k1fRbF1Y9WTygBW5a
|
||||
iXPy+KbmaD5DsDpJNkF/2zOQDLG9nKmLbamrAcHFU8l8kAVwkdhYgu3T8QbLksoz
|
||||
YPiYavg9KfA51wVxTRuUyLpvSLJkc1q0xwuJiE6d46Grdpfyve9cS4G9JxLUL1S9
|
||||
HPMIT6rO25AKepPbtGMU/MN/yj/qfqWKga/X/bQzPyQB2UjNFI/0kn3iBi+yJRmI
|
||||
3o7ku0exd75eRhMPR7FyG9yfgMroK3FjSJE5fj73akkEd4SW8FgyaeUeoeYxj1G+
|
||||
sVaLm6aBbwIDAQABo4IBEzCCAQ8wDgYDVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQG
|
||||
CCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUBwPuvsOqVMzZke3aVEQTzcXC
|
||||
EDwwSgYIKwYBBQUHAQEEPjA8MDoGCCsGAQUFBzAChi5odHRwOi8vMTcyLjIwLjAu
|
||||
MjI3OjgyMDAvdjEvY2hhcm0tcGtpLWxvY2FsL2NhMDEGA1UdEQQqMCiCIGp1anUt
|
||||
OTNiMDlkLXphemEtYWMzMDBhNjEzOTI2LTExhwSsFABAMEAGA1UdHwQ5MDcwNaAz
|
||||
oDGGL2h0dHA6Ly8xNzIuMjAuMC4yMjc6ODIwMC92MS9jaGFybS1wa2ktbG9jYWwv
|
||||
Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQBr3WbXVesJ4R2P1Z67BS+wy9a1JYRLtn7l
|
||||
yS+XoEYKhpbxTZh0q74sAhGxoSlvc9GGyeeIsXzndw6pbGyK6WCOmJoelWIYr0Be
|
||||
wzSbqkarasPFVpPJnFAGqry6y5B3lZ3OrhHJOIwMSOMQfPt2dSsz+HqfrMwxqAek
|
||||
smciCVWqVwN+uq0yqeH5QuACHlkJSV4o/5SkDcFZFaFHuTRqd6hMpczZIw+o+NRn
|
||||
OO1YV69oqCCfUE01zlwTF7thZA19xacGS9f8GJO9Ij15MiysZLjxoTfoof/wDdNd
|
||||
A0Rs/pW3ja1UfTItPdjC4BgWtQh1a7O9NznrW2L6nRCASI0F1FvQ
|
||||
-----END CERTIFICATE-----"""
|
||||
|
||||
TEST_SERVER_KEY = """-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEA4VYeKjC3o9GZAnbuVBudyd/a5sHnaGZlMJz8zevhGr5nARRR
|
||||
194bgR8VSB9k1fRbF1Y9WTygBW5aiXPy+KbmaD5DsDpJNkF/2zOQDLG9nKmLbamr
|
||||
AcHFU8l8kAVwkdhYgu3T8QbLksozYPiYavg9KfA51wVxTRuUyLpvSLJkc1q0xwuJ
|
||||
iE6d46Grdpfyve9cS4G9JxLUL1S9HPMIT6rO25AKepPbtGMU/MN/yj/qfqWKga/X
|
||||
/bQzPyQB2UjNFI/0kn3iBi+yJRmI3o7ku0exd75eRhMPR7FyG9yfgMroK3FjSJE5
|
||||
fj73akkEd4SW8FgyaeUeoeYxj1G+sVaLm6aBbwIDAQABAoIBAQC8O84y/ENLa5lf
|
||||
v63TQMaMjp0zyqLeSTsaYumjsvl197vf4POFWhqrwCVs/BylxdwaIIZa9xPNtaOX
|
||||
0u4S3Ij4Z5rvqaDi29BMckRQ9mEob1DzqJobe5y1I0kUnhatHobByJ4VZ9HCq3pD
|
||||
9SaNpRSi5fPLNNayzOl6zJKNrcfPu1IA085oCzANmFBPM9+3H4xOgIT9f/0ypw24
|
||||
F9iZ6SEp6p81iTvlPB7FSLakMAww3V63M9E92drA2sB2veDRfR8/vHoEdL5vhYZU
|
||||
v4/GdwzByL2IplJLB1I9fsITzZs9DXdw6+musOq9i8u8R1G6IickPslUegn+PPFR
|
||||
vcDP69dxAoGBAP45mzH/qYKhbe9Vf+OJgU0is0gEeixlTeiIFhEU6AjGr7/2rTX5
|
||||
7Etzdc0muCc5Atepf82pqoY3Ns8kE/FGbmFJTGTsFIK+GAdMDaH0IDG1zUoBbOqL
|
||||
58Xrq42wEX2CuCeCHTiHSVsB4/uY+IfzOa+t+CrwczZl3+i/4PrKCaZ9AoGBAOLo
|
||||
4IHmenDgBSbQIWOAaUrO2jTWjsRNIDOO0tfkJCnT/bLgaWK1Lg313gD87PF+/sFM
|
||||
6TakFC9e0ieLKDKbT6aML1uF3nTl3qkE2K771PM57w/w3zdPalRbbpTgJc4BWhJc
|
||||
iqSPsrUYfHvy5IpbdMnzKRbOGR9Hc6bx3aA+Aw9bAoGAZsHuIyWN5MlPYGAU02nv
|
||||
I7iU8tUsdOl1tjnbgYgLyhBVVahllt2wT0caJJQz91ap+XX/vKeJz7pdoxiYHvwy
|
||||
/YvdHyX1nGst1zU8hWvh33X2xqUQ2zU1t+BsdVbnmu3Nddq36PN2CR0Yg8fvHTSI
|
||||
6qPNHb4XM7O176QvUe98OxkCgYB5AucQf+EWp3I349GaphYBLlXSzgYvjE47ENVD
|
||||
C8l5gTQQnHu3h5Z7HX97GWgn1ql4X1MUr+aP6Mq9CgqzCn8s/CAZeEhOIXVgwFPq
|
||||
5iUIXgIvhy8T6Ud0m5pazTt8JN5rYm0SHAybZeall8DoRKQBO6vTHLDrLIjyJJUk
|
||||
a03odwKBgG454yINXnHPBo9jjcEKwBTaMLH0n25HMJmWaJUnGVmPzrhxHp5xMKZz
|
||||
ULTaKTN2gp7E2BuxENtAyplrvLiXXYH3CqT528JgMdMm0al6X3MXo9WqbOg/KNpa
|
||||
4JSyyuZ42yGmYlhMCimlk3kVnDxb8PJLWOFnx6f9/i0RWUqnY0nU
|
||||
-----END RSA PRIVATE KEY-----"""
|
||||
|
||||
TEST_CSR = """-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIICxTCCAa0CAQAwRzEWMBQGA1UEAwwNb3ZuLWNlbnRyYWwtMDEtMCsGA1UELQwk
|
||||
ZTFhZjIxMmEtNTUxOC00MjkyLWIxZTktZGM3NThlZTZkMzEwMIIBIjANBgkqhkiG
|
||||
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzrJfdNfHKyzu9dAnoEi2DE9nTQnkUjGVIMXg
|
||||
8oCy5NgIm2ORoeXrdrr2ODIUepxX7M40cWmvoFiABK6DGgpP3wh6XosWftIc8hrX
|
||||
/KaHtL4iru+dKJF9d5TNe+vWoFY1jh3k/+c+F59UhdoRbw4QcSTgLBvsb/XC8pdD
|
||||
gc96GWgzyA5exZN9xXg8dvHCMKLLzCkAgHkMlkSPB6Ghi9bUeeXIYRvU8D+EMh+R
|
||||
hKaSsvOxRyISgkvE0cGQ86NIuXkNvvvr8bFYNLMxBNkjZrqlZyqhEsq0eZAAm5iu
|
||||
Fi33z3uBaA5d7V7wbYAxWuFlckdGTHql3vO/W2X3PHbT/TEBxQIDAQABoDkwNwYJ
|
||||
KoZIhvcNAQkOMSowKDAmBgNVHREEHzAdggwxMC4xLjEwNy4yMTSCDTEwLjE1Mi4x
|
||||
ODMuMTkwDQYJKoZIhvcNAQELBQADggEBADJSto8T6XiMdjMKekhS6SsQKNyijVJ0
|
||||
cJr7x1u8FLEbCWlLRO9kMroz4i4iSu5xYcewNsRioiN4A56FuoOE8qCjzAHczR/8
|
||||
Anah4rYJFt7wCu+RxfHEvBmSYgV0Rbq/KwjYnclCpTu/m5yrUmsI092+2AaOB/nA
|
||||
c0Npr5oZZPeWL4S3+c02IxCeH1EwxIQtfprA/VgCWpEU25ImQb/c14KF5EQEHhv6
|
||||
A5qVqdCCg4LlNrqiFYyVtHqDco+voq4W95KkkUYe20o16qOTwpR72qn75DagO/8I
|
||||
R3iMBPwkhi4+igbliU/EMLltTj8pMilUhc1Ewuji4QZhsM2qxgZkcBk=
|
||||
-----END CERTIFICATE REQUEST-----"""
|
||||
|
||||
|
||||
class ContainerCalls:
|
||||
"""Object to log container calls."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Init container calls."""
|
||||
self.start = collections.defaultdict(list)
|
||||
self.stop = collections.defaultdict(list)
|
||||
self.push = collections.defaultdict(list)
|
||||
self.pull = collections.defaultdict(list)
|
||||
self.execute = collections.defaultdict(list)
|
||||
self.remove_path = collections.defaultdict(list)
|
||||
|
||||
def add_start(self, container_name: str, call: typing.Dict) -> None:
|
||||
"""Log a start call."""
|
||||
self.start[container_name].append(call)
|
||||
|
||||
def add_stop(self, container_name: str, call: typing.Dict) -> None:
|
||||
"""Log a start call."""
|
||||
self.start[container_name].append(call)
|
||||
|
||||
def started_services(self, container_name: str) -> List:
|
||||
"""Distinct unordered list of services that were started."""
|
||||
return list(
|
||||
set(
|
||||
[
|
||||
svc
|
||||
for svc_list in self.start[container_name]
|
||||
for svc in svc_list
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
def stopped_services(self, container_name: str) -> List:
|
||||
"""Distinct unordered list of services that were started."""
|
||||
return list(
|
||||
set(
|
||||
[
|
||||
svc
|
||||
for svc_list in self.stop[container_name]
|
||||
for svc in svc_list
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
def add_push(self, container_name: str, call: typing.Dict) -> None:
|
||||
"""Log a push call."""
|
||||
self.push[container_name].append(call)
|
||||
|
||||
def add_pull(self, container_name: str, call: typing.Dict) -> None:
|
||||
"""Log a pull call."""
|
||||
self.pull[container_name].append(call)
|
||||
|
||||
def add_execute(self, container_name: str, call: typing.List) -> None:
|
||||
"""Log a execute call."""
|
||||
self.execute[container_name].append(call)
|
||||
|
||||
def add_remove_path(self, container_name: str, call: str) -> None:
|
||||
"""Log a remove path call."""
|
||||
self.remove_path[container_name].append(call)
|
||||
|
||||
def updated_files(self, container_name: str) -> typing.List:
|
||||
"""Return a list of files that have been updated in a container."""
|
||||
return [c["path"] for c in self.push.get(container_name, [])]
|
||||
|
||||
def file_update_calls(
|
||||
self, container_name: str, file_name: str
|
||||
) -> typing.List:
|
||||
"""Return the update call for File_name in container_name."""
|
||||
return [
|
||||
c
|
||||
for c in self.push.get(container_name, [])
|
||||
if c["path"] == file_name
|
||||
]
|
||||
|
||||
|
||||
class CharmTestCase(unittest.TestCase):
|
||||
"""Class to make mocking easier."""
|
||||
|
||||
container_calls = ContainerCalls()
|
||||
|
||||
def setUp(self, obj: "typing.ANY", patches: "typing.List") -> None:
|
||||
"""Run constructor."""
|
||||
super().setUp()
|
||||
self.patches = patches
|
||||
self.obj = obj
|
||||
self.patch_all()
|
||||
|
||||
def patch(self, method: "typing.ANY") -> Mock:
|
||||
"""Patch the named method on self.obj."""
|
||||
_m = patch.object(self.obj, method)
|
||||
mock = _m.start()
|
||||
self.addCleanup(_m.stop)
|
||||
return mock
|
||||
|
||||
def patch_obj(self, obj: "typing.ANY", method: "typing.ANY") -> Mock:
|
||||
"""Patch the named method on obj."""
|
||||
_m = patch.object(obj, method)
|
||||
mock = _m.start()
|
||||
self.addCleanup(_m.stop)
|
||||
return mock
|
||||
|
||||
def patch_all(self) -> None:
|
||||
"""Patch all objects in self.patches."""
|
||||
for method in self.patches:
|
||||
setattr(self, method, self.patch(method))
|
||||
|
||||
def check_file(
|
||||
self,
|
||||
container: str,
|
||||
path: str,
|
||||
contents: typing.List = None,
|
||||
user: str = None,
|
||||
group: str = None,
|
||||
permissions: str = None,
|
||||
) -> None:
|
||||
"""Check the attributes of a file."""
|
||||
client = self.harness.charm.unit.get_container(container)._pebble
|
||||
files = client.list_files(path, itself=True)
|
||||
self.assertEqual(len(files), 1)
|
||||
test_file = files[0]
|
||||
self.assertEqual(test_file.path, path)
|
||||
if contents:
|
||||
with client.pull(path) as infile:
|
||||
received_data = infile.read()
|
||||
self.assertEqual(contents, received_data)
|
||||
if user:
|
||||
self.assertEqual(test_file.user_id, user)
|
||||
if group:
|
||||
self.assertEqual(test_file.group_id, group)
|
||||
if permissions:
|
||||
self.assertEqual(test_file.permissions, permissions)
|
||||
|
||||
|
||||
def add_ingress_relation(harness: Harness, endpoint_type: str) -> str:
|
||||
"""Add ingress relation."""
|
||||
app_name = "traefik-" + endpoint_type
|
||||
unit_name = app_name + "/0"
|
||||
rel_name = "ingress-" + endpoint_type
|
||||
rel_id = harness.add_relation(rel_name, app_name)
|
||||
harness.add_relation_unit(rel_id, unit_name)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_ingress_relation_data(
|
||||
harness: Harness, rel_id: str, endpoint_type: str
|
||||
) -> None:
|
||||
"""Add ingress data to ingress relation."""
|
||||
app_name = "traefik-" + endpoint_type
|
||||
url = "http://" + endpoint_type + "-url"
|
||||
|
||||
ingress_data = {"url": url}
|
||||
harness.update_relation_data(
|
||||
rel_id, app_name, {"ingress": json.dumps(ingress_data)}
|
||||
)
|
||||
|
||||
|
||||
def add_complete_ingress_relation(harness: Harness) -> None:
|
||||
"""Add complete Ingress relation."""
|
||||
for endpoint_type in ["internal", "public"]:
|
||||
rel_id = add_ingress_relation(harness, endpoint_type)
|
||||
add_ingress_relation_data(harness, rel_id, endpoint_type)
|
||||
|
||||
|
||||
def add_base_amqp_relation(harness: Harness) -> str:
|
||||
"""Add amqp relation."""
|
||||
rel_id = harness.add_relation("amqp", "rabbitmq")
|
||||
harness.add_relation_unit(rel_id, "rabbitmq/0")
|
||||
harness.add_relation_unit(rel_id, "rabbitmq/0")
|
||||
harness.update_relation_data(
|
||||
rel_id, "rabbitmq/0", {"ingress-address": "10.0.0.13"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_amqp_relation_credentials(harness: Harness, rel_id: str) -> None:
|
||||
"""Add amqp data to amqp relation."""
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
"rabbitmq",
|
||||
{"hostname": "rabbithost1.local", "password": "rabbit.pass"},
|
||||
)
|
||||
|
||||
|
||||
def add_base_ceph_access_relation(harness: Harness) -> str:
|
||||
"""Add ceph-access relation."""
|
||||
rel_id = harness.add_relation(
|
||||
"ceph-access", "cinder-ceph", app_data={"a": "b"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_ceph_access_relation_response(harness: Harness, rel_id: str) -> None:
|
||||
"""Add secret data to cinder-access relation."""
|
||||
credentials_content = {"uuid": "svcuser1", "key": "svcpass1"}
|
||||
credentials_id = harness.add_model_secret(
|
||||
"cinder-ceph", credentials_content
|
||||
)
|
||||
harness.grant_secret(credentials_id, harness.charm.app.name)
|
||||
harness.update_relation_data(
|
||||
rel_id, "cinder-ceph", {"access-credentials": credentials_id}
|
||||
)
|
||||
|
||||
|
||||
def add_base_identity_service_relation(harness: Harness) -> str:
|
||||
"""Add identity-service relation."""
|
||||
rel_id = harness.add_relation("identity-service", "keystone")
|
||||
harness.add_relation_unit(rel_id, "keystone/0")
|
||||
harness.add_relation_unit(rel_id, "keystone/0")
|
||||
harness.update_relation_data(
|
||||
rel_id, "keystone/0", {"ingress-address": "10.0.0.33"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_identity_service_relation_response(
|
||||
harness: Harness, rel_id: str
|
||||
) -> None:
|
||||
"""Add id service data to identity-service relation."""
|
||||
credentials_content = {"username": "svcuser1", "password": "svcpass1"}
|
||||
credentials_id = harness.add_model_secret("keystone", credentials_content)
|
||||
harness.grant_secret(credentials_id, harness.charm.app.name)
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
"keystone",
|
||||
{
|
||||
"admin-domain-id": "admindomid1",
|
||||
"admin-project-id": "adminprojid1",
|
||||
"admin-user-id": "adminuserid1",
|
||||
"api-version": "3",
|
||||
"auth-host": "keystone.local",
|
||||
"auth-port": "12345",
|
||||
"auth-protocol": "http",
|
||||
"internal-host": "keystone.internal",
|
||||
"internal-port": "5000",
|
||||
"internal-protocol": "http",
|
||||
"service-domain": "servicedom",
|
||||
"service-domain_id": "svcdomid1",
|
||||
"service-host": "keystone.service",
|
||||
"service-port": "5000",
|
||||
"service-protocol": "http",
|
||||
"service-project": "svcproj1",
|
||||
"service-project-id": "svcprojid1",
|
||||
"service-credentials": credentials_id,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def add_base_identity_credentials_relation(harness: Harness) -> str:
|
||||
"""Add identity-service relation."""
|
||||
rel_id = harness.add_relation("identity-credentials", "keystone")
|
||||
harness.add_relation_unit(rel_id, "keystone/0")
|
||||
harness.add_relation_unit(rel_id, "keystone/0")
|
||||
harness.update_relation_data(
|
||||
rel_id, "keystone/0", {"ingress-address": "10.0.0.35"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_identity_credentials_relation_response(
|
||||
harness: Harness, rel_id: str
|
||||
) -> None:
|
||||
"""Add id service data to identity-service relation."""
|
||||
credentials_content = {"username": "username", "password": "user-password"}
|
||||
credentials_id = harness.add_model_secret("keystone", credentials_content)
|
||||
harness.grant_secret(credentials_id, harness.charm.app.name)
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
"keystone",
|
||||
{
|
||||
"api-version": "3",
|
||||
"auth-host": "keystone.local",
|
||||
"auth-port": "12345",
|
||||
"auth-protocol": "http",
|
||||
"internal-host": "keystone.internal",
|
||||
"internal-port": "5000",
|
||||
"internal-protocol": "http",
|
||||
"credentials": credentials_id,
|
||||
"project-name": "user-project",
|
||||
"project-id": "uproj-id",
|
||||
"user-domain-name": "udomain-name",
|
||||
"user-domain-id": "udomain-id",
|
||||
"project-domain-name": "pdomain_-ame",
|
||||
"project-domain-id": "pdomain-id",
|
||||
"region": "region12",
|
||||
"public-endpoint": "http://10.20.21.11:80/openstack-keystone",
|
||||
"internal-endpoint": "http://10.153.2.45:80/openstack-keystone",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def add_base_db_relation(harness: Harness) -> str:
|
||||
"""Add db relation."""
|
||||
rel_id = harness.add_relation("database", "mysql")
|
||||
harness.add_relation_unit(rel_id, "mysql/0")
|
||||
harness.add_relation_unit(rel_id, "mysql/0")
|
||||
harness.update_relation_data(
|
||||
rel_id, "mysql/0", {"ingress-address": "10.0.0.3"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_db_relation_credentials(harness: Harness, rel_id: str) -> None:
|
||||
"""Add db credentials data to db relation."""
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
"mysql",
|
||||
{
|
||||
"username": "foo",
|
||||
"password": "hardpassword",
|
||||
"endpoints": "10.0.0.10",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def add_api_relations(harness: Harness) -> None:
|
||||
"""Add standard relation to api charm."""
|
||||
add_db_relation_credentials(harness, add_base_db_relation(harness))
|
||||
add_amqp_relation_credentials(harness, add_base_amqp_relation(harness))
|
||||
add_identity_service_relation_response(
|
||||
harness, add_base_identity_service_relation(harness)
|
||||
)
|
||||
|
||||
|
||||
def add_complete_db_relation(harness: Harness) -> None:
|
||||
"""Add complete DB relation."""
|
||||
rel_id = add_base_db_relation(harness)
|
||||
add_db_relation_credentials(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_identity_relation(harness: Harness) -> None:
|
||||
"""Add complete Identity relation."""
|
||||
rel_id = add_base_identity_service_relation(harness)
|
||||
add_identity_service_relation_response(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_identity_credentials_relation(harness: Harness) -> None:
|
||||
"""Add complete identity-credentials relation."""
|
||||
rel_id = add_base_identity_credentials_relation(harness)
|
||||
add_identity_credentials_relation_response(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_amqp_relation(harness: Harness) -> None:
|
||||
"""Add complete AMQP relation."""
|
||||
rel_id = add_base_amqp_relation(harness)
|
||||
add_amqp_relation_credentials(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_ceph_relation_credentials(harness: Harness, rel_id: str) -> None:
|
||||
"""Add amqp data to amqp relation."""
|
||||
# During tests the charm class is never destroyed and recreated as it
|
||||
# would be between hook executions. This means request is never marked
|
||||
# as complete as it never matches the previous request and always looks
|
||||
# like it needs resending.
|
||||
harness.charm.ceph.interface.previous_requests = (
|
||||
harness.charm.ceph.interface.get_previous_requests_from_relations()
|
||||
)
|
||||
request = json.loads(
|
||||
harness.get_relation_data(rel_id, harness.charm.unit.name)[
|
||||
"broker_req"
|
||||
]
|
||||
)
|
||||
client_unit = harness.charm.unit.name.replace("/", "-")
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
"ceph-mon/0",
|
||||
{
|
||||
"auth": "cephx",
|
||||
"key": "AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==",
|
||||
"ingress-address": "192.0.2.2",
|
||||
"ceph-public-address": "192.0.2.2",
|
||||
f"broker-rsp-{client_unit}": json.dumps(
|
||||
{"exit-code": 0, "request-id": request["request-id"]}
|
||||
),
|
||||
},
|
||||
)
|
||||
harness.add_relation_unit(rel_id, "ceph-mon/1")
|
||||
|
||||
|
||||
def add_base_ceph_relation(harness: Harness) -> str:
|
||||
"""Add identity-service relation."""
|
||||
rel_id = harness.add_relation("ceph", "ceph-mon")
|
||||
harness.add_relation_unit(rel_id, "ceph-mon/0")
|
||||
harness.update_relation_data(
|
||||
rel_id, "ceph-mon/0", {"ingress-address": "10.0.0.33"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_ceph_relation(harness: Harness) -> None:
|
||||
"""Add complete ceph relation."""
|
||||
rel_id = add_base_ceph_relation(harness)
|
||||
add_ceph_relation_credentials(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_certificates_relation_certs(harness: Harness, rel_id: str) -> None:
|
||||
"""Add cert data to certificates relation."""
|
||||
cert = {
|
||||
"certificate": TEST_SERVER_CERT,
|
||||
"certificate_signing_request": TEST_CSR,
|
||||
"ca": TEST_CA,
|
||||
"chain": TEST_CHAIN,
|
||||
}
|
||||
harness.update_relation_data(
|
||||
rel_id, "vault", {"certificates": json.dumps([cert])}
|
||||
)
|
||||
|
||||
|
||||
def add_base_certificates_relation(harness: Harness) -> str:
|
||||
"""Add certificates relation."""
|
||||
rel_id = harness.add_relation("certificates", "vault")
|
||||
harness.add_relation_unit(rel_id, "vault/0")
|
||||
csr = {"certificate_signing_request": TEST_CSR}
|
||||
harness.update_relation_data(
|
||||
rel_id,
|
||||
harness.charm.unit.name,
|
||||
{
|
||||
"ingress-address": "10.0.0.34",
|
||||
"certificate_signing_requests": json.dumps([csr]),
|
||||
},
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_certificates_relation(harness: Harness) -> None:
|
||||
"""Add complete certificates relation."""
|
||||
rel_id = add_base_certificates_relation(harness)
|
||||
add_certificates_relation_certs(harness, rel_id)
|
||||
return rel_id
|
||||
|
||||
|
||||
def add_complete_peer_relation(harness: Harness) -> None:
|
||||
"""Add complete peer relation."""
|
||||
rel_id = harness.add_relation("peers", harness.charm.app.name)
|
||||
new_unit = f"{harness.charm.app.name}/1"
|
||||
harness.add_relation_unit(rel_id, new_unit)
|
||||
harness.update_relation_data(
|
||||
rel_id, new_unit, {"ingress-address": "10.0.0.35"}
|
||||
)
|
||||
return rel_id
|
||||
|
||||
|
||||
test_relations = {
|
||||
"database": add_complete_db_relation,
|
||||
"amqp": add_complete_amqp_relation,
|
||||
"identity-service": add_complete_identity_relation,
|
||||
"identity-credentials": add_complete_identity_credentials_relation,
|
||||
"peers": add_complete_peer_relation,
|
||||
"certificates": add_complete_certificates_relation,
|
||||
"ceph": add_complete_ceph_relation,
|
||||
}
|
||||
|
||||
|
||||
def add_all_relations(harness: Harness) -> None:
|
||||
"""Add all the relations there are test relations for."""
|
||||
rel_ids = {}
|
||||
for key in harness._meta.relations.keys():
|
||||
if test_relations.get(key):
|
||||
rel_id = test_relations[key](harness)
|
||||
rel_ids[key] = rel_id
|
||||
return rel_ids
|
||||
|
||||
|
||||
def set_all_pebbles_ready(harness: Harness) -> None:
|
||||
"""Set all known pebble handlers to ready."""
|
||||
for container in harness._meta.containers:
|
||||
harness.container_pebble_ready(container)
|
||||
|
||||
|
||||
def set_remote_leader_ready(
|
||||
harness: Harness,
|
||||
rel_id: int,
|
||||
) -> None:
|
||||
"""Update relation data to show leader is ready."""
|
||||
harness.update_relation_data(
|
||||
rel_id, harness.charm.app.name, {"leader_ready": "true"}
|
||||
)
|
||||
|
||||
|
||||
def get_harness(
|
||||
charm_class: ops.charm.CharmBase,
|
||||
charm_metadata: str = None,
|
||||
container_calls: dict = None,
|
||||
charm_config: str = None,
|
||||
initial_charm_config: dict = None,
|
||||
) -> Harness:
|
||||
"""Return a testing harness."""
|
||||
|
||||
class _OSTestingPebbleClient(_TestingPebbleClient):
|
||||
def exec(
|
||||
self,
|
||||
command: typing.List[str],
|
||||
*,
|
||||
service_context: Optional[str] = None,
|
||||
environment: typing.Dict[str, str] = None,
|
||||
working_dir: str = None,
|
||||
timeout: float = None,
|
||||
user_id: int = None,
|
||||
user: str = None,
|
||||
group_id: int = None,
|
||||
group: str = None,
|
||||
stdin: typing.Union[
|
||||
str, bytes, typing.TextIO, typing.BinaryIO
|
||||
] = None,
|
||||
stdout: typing.Union[typing.TextIO, typing.BinaryIO] = None,
|
||||
stderr: typing.Union[typing.TextIO, typing.BinaryIO] = None,
|
||||
encoding: str = "utf-8",
|
||||
combine_stderr: bool = False,
|
||||
) -> None:
|
||||
container_calls.add_execute(self.container_name, command)
|
||||
process_mock = MagicMock()
|
||||
process_mock.wait_output.return_value = ("", None)
|
||||
return process_mock
|
||||
|
||||
def start_services(
|
||||
self,
|
||||
services: List[str],
|
||||
timeout: float = 30.0,
|
||||
delay: float = 0.1,
|
||||
) -> None:
|
||||
"""Record start service events."""
|
||||
super().start_services(services, timeout, delay)
|
||||
container_calls.add_start(self.container_name, services)
|
||||
|
||||
def stop_services(
|
||||
self,
|
||||
services: List[str],
|
||||
timeout: float = 30.0,
|
||||
delay: float = 0.1,
|
||||
) -> None:
|
||||
"""Record stop service events."""
|
||||
super().stop_services(services, timeout, delay)
|
||||
container_calls.add_stop(self.container_name, services)
|
||||
|
||||
class _OSTestingModelBackend(_TestingModelBackend):
|
||||
def get_pebble(self, socket_path: str) -> _OSTestingPebbleClient:
|
||||
"""Get the testing pebble client."""
|
||||
container = socket_path.split("/")[
|
||||
3
|
||||
] # /charm/containers/<container_name>/pebble.socket
|
||||
client = self._pebble_clients.get(container, None)
|
||||
if client is None:
|
||||
container_root = self._harness_container_path / container
|
||||
container_root.mkdir()
|
||||
# Below two lines are changes from parent class method
|
||||
client = _OSTestingPebbleClient(
|
||||
self, container_root=container_root
|
||||
)
|
||||
# Add container name to the pebble client
|
||||
client.container_name = container
|
||||
|
||||
# we need to know which container a new pebble client belongs to
|
||||
# so we can figure out which storage mounts must be simulated on
|
||||
# this pebble client's mock file systems when storage is
|
||||
# attached/detached later.
|
||||
self._pebble_clients[container] = client
|
||||
|
||||
self._pebble_clients_can_connect[client] = False
|
||||
return client
|
||||
|
||||
def network_get(
|
||||
self, endpoint_name: str, relation_id: str = None
|
||||
) -> dict:
|
||||
"""Return a fake set of network data."""
|
||||
network_data = {
|
||||
"bind-addresses": [
|
||||
{
|
||||
"interface-name": "eth0",
|
||||
"addresses": [
|
||||
{"cidr": "10.0.0.0/24", "value": "10.0.0.10"}
|
||||
],
|
||||
}
|
||||
],
|
||||
"ingress-addresses": ["10.0.0.10"],
|
||||
"egress-subnets": ["10.0.0.0/24"],
|
||||
}
|
||||
return network_data
|
||||
|
||||
filename = inspect.getfile(charm_class)
|
||||
# Use pathlib.Path(filename).parents[1] if tests structure is
|
||||
# <charm>/unit_tests
|
||||
# Use pathlib.Path(filename).parents[2] if tests structure is
|
||||
# <charm>/tests/unit/
|
||||
charm_dir = pathlib.Path(filename).parents[2]
|
||||
|
||||
if not charm_metadata:
|
||||
metadata_file = f"{charm_dir}/metadata.yaml"
|
||||
if os.path.isfile(metadata_file):
|
||||
with open(metadata_file) as f:
|
||||
charm_metadata = f.read()
|
||||
|
||||
harness = Harness(charm_class, meta=charm_metadata, config=charm_config)
|
||||
harness._backend = _OSTestingModelBackend(
|
||||
harness._unit_name, harness._meta, harness._get_config(charm_config)
|
||||
)
|
||||
harness._model = model.Model(harness._meta, harness._backend)
|
||||
harness._framework = framework.Framework(
|
||||
":memory:", harness._charm_dir, harness._meta, harness._model
|
||||
)
|
||||
harness.set_model_name("test-model")
|
||||
|
||||
return harness
|
39
ops-sunbeam/pyproject.toml
Normal file
39
ops-sunbeam/pyproject.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
# Copyright 2022 Canonical Ltd.
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
# Testing tools configuration
|
||||
[tool.coverage.run]
|
||||
branch = true
|
||||
|
||||
[tool.coverage.report]
|
||||
show_missing = true
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
log_cli_level = "INFO"
|
||||
|
||||
# Formatting tools configuration
|
||||
[tool.black]
|
||||
line-length = 79
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
multi_line_output = 3
|
||||
force_grid_wrap = true
|
||||
|
||||
# Linting tools configuration
|
||||
[tool.flake8]
|
||||
max-line-length = 79
|
||||
max-doc-length = 99
|
||||
max-complexity = 10
|
||||
exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
|
||||
select = ["E", "W", "F", "C", "N", "R", "D", "H"]
|
||||
# Ignore W503, E501 because using black creates errors with this
|
||||
# Ignore D107 Missing docstring in __init__
|
||||
ignore = ["W503", "E501", "D107", "E402"]
|
||||
per-file-ignores = []
|
||||
docstring-convention = "google"
|
||||
# Check for properly formatted copyright header in each file
|
||||
copyright-check = "True"
|
||||
copyright-author = "Canonical Ltd."
|
||||
copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
|
3
ops-sunbeam/requirements-dev.txt
Normal file
3
ops-sunbeam/requirements-dev.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
-r requirements.txt
|
||||
coverage
|
||||
flake8
|
10
ops-sunbeam/requirements.txt
Normal file
10
ops-sunbeam/requirements.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
jinja2
|
||||
jsonschema
|
||||
kubernetes
|
||||
ops
|
||||
python-keystoneclient
|
||||
git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
|
||||
lightkube
|
||||
lightkube-models
|
||||
tenacity
|
||||
pydantic<2.0
|
18
ops-sunbeam/setup.cfg
Normal file
18
ops-sunbeam/setup.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
[metadata]
|
||||
name = ops_sunbeam
|
||||
summary = Charm lib for OpenStack Charms using operator framework
|
||||
version = 0.0.1.dev1
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack Charmers
|
||||
author-email = openstack-charmers@lists.ubuntu.com
|
||||
classifier =
|
||||
Development Status :: 2 - Pre-Alpha
|
||||
Intended Audience :: Developers
|
||||
Topic :: System
|
||||
Topic :: System :: Installation/Setup
|
||||
opic :: System :: Software Distribution
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.5
|
||||
License :: OSI Approved :: Apache Software License
|
||||
|
41
ops-sunbeam/setup.py
Normal file
41
ops-sunbeam/setup.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Module used to setup the ops_sunbeam framework."""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
version = "0.0.1.dev1"
|
||||
install_require = [
|
||||
'ops',
|
||||
'tenacity',
|
||||
'lightkube',
|
||||
'lightkube-models',
|
||||
]
|
||||
|
||||
tests_require = [
|
||||
'tox >= 2.3.1',
|
||||
]
|
||||
|
||||
setup(
|
||||
license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0',
|
||||
packages=find_packages(exclude=["unit_tests", "lib"]),
|
||||
zip_safe=False,
|
||||
install_requires=install_require,
|
||||
)
|
||||
|
3
ops-sunbeam/shared_code/.stestr.conf
Normal file
3
ops-sunbeam/shared_code/.stestr.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./unit_tests
|
||||
top_dir=./
|
26
ops-sunbeam/shared_code/config-api.yaml
Normal file
26
ops-sunbeam/shared_code/config-api.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
debug:
|
||||
default: False
|
||||
description: Enable debug logging.
|
||||
type: boolean
|
||||
os-admin-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the admin endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
os-internal-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the internal endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
os-public-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the internal endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
region:
|
||||
default: RegionOne
|
||||
description: Space delimited list of OpenStack regions
|
||||
type: string
|
230
ops-sunbeam/shared_code/config-ceph-options.yaml
Normal file
230
ops-sunbeam/shared_code/config-ceph-options.yaml
Normal file
@@ -0,0 +1,230 @@
|
||||
ceph-osd-replication-count:
|
||||
default: 3
|
||||
type: int
|
||||
description: |
|
||||
This value dictates the number of replicas ceph must make of any
|
||||
object it stores within the cinder rbd pool. Of course, this only
|
||||
applies if using Ceph as a backend store. Note that once the cinder
|
||||
rbd pool has been created, changing this value will not have any
|
||||
effect (although it can be changed in ceph by manually configuring
|
||||
your ceph cluster).
|
||||
ceph-pool-weight:
|
||||
type: int
|
||||
default: 40
|
||||
description: |
|
||||
Defines a relative weighting of the pool as a percentage of the total
|
||||
amount of data in the Ceph cluster. This effectively weights the number
|
||||
of placement groups for the pool created to be appropriately portioned
|
||||
to the amount of data expected. For example, if the ephemeral volumes
|
||||
for the OpenStack compute instances are expected to take up 20% of the
|
||||
overall configuration then this value would be specified as 20. Note -
|
||||
it is important to choose an appropriate value for the pool weight as
|
||||
this directly affects the number of placement groups which will be
|
||||
created for the pool. The number of placement groups for a pool can
|
||||
only be increased, never decreased - so it is important to identify the
|
||||
percent of data that will likely reside in the pool.
|
||||
volume-backend-name:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Volume backend name for the backend. The default value is the
|
||||
application name in the Juju model, e.g. "cinder-ceph-mybackend"
|
||||
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
|
||||
A common backend name can be set to multiple backends with the
|
||||
same characters so that those can be treated as a single virtual
|
||||
backend associated with a single volume type.
|
||||
backend-availability-zone:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Availability zone name of this volume backend. If set, it will
|
||||
override the default availability zone. Supported for Pike or
|
||||
newer releases.
|
||||
restrict-ceph-pools:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
Optionally restrict Ceph key permissions to access pools as required.
|
||||
rbd-pool-name:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Optionally specify an existing rbd pool that cinder should map to.
|
||||
rbd-flatten-volume-from-snapshot:
|
||||
default:
|
||||
type: boolean
|
||||
default: False
|
||||
description: |
|
||||
Flatten volumes created from snapshots to remove dependency from
|
||||
volume to snapshot. Supported on Queens+
|
||||
rbd-mirroring-mode:
|
||||
type: string
|
||||
default: pool
|
||||
description: |
|
||||
The RBD mirroring mode used for the Ceph pool. This option is only used
|
||||
with 'replicated' pool type, as it's not supported for 'erasure-coded'
|
||||
pool type - valid values: 'pool' and 'image'
|
||||
pool-type:
|
||||
type: string
|
||||
default: replicated
|
||||
description: |
|
||||
Ceph pool type to use for storage - valid values include ‘replicated’
|
||||
and ‘erasure-coded’.
|
||||
ec-profile-name:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name for the EC profile to be created for the EC pools. If not defined
|
||||
a profile name will be generated based on the name of the pool used by
|
||||
the application.
|
||||
ec-rbd-metadata-pool:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name of the metadata pool to be created (for RBD use-cases). If not
|
||||
defined a metadata pool name will be generated based on the name of
|
||||
the data pool used by the application. The metadata pool is always
|
||||
replicated, not erasure coded.
|
||||
ec-profile-k:
|
||||
type: int
|
||||
default: 1
|
||||
description: |
|
||||
Number of data chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-m:
|
||||
type: int
|
||||
default: 2
|
||||
description: |
|
||||
Number of coding chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-locality:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin - l) Group the coding and data chunks into sets of size l.
|
||||
For instance, for k=4 and m=2, when l=3 two groups of three are created.
|
||||
Each set can be recovered without reading chunks from another set. Note
|
||||
that using the lrc plugin does incur more raw storage usage than isa or
|
||||
jerasure in order to reduce the cost of recovery operations.
|
||||
ec-profile-crush-locality:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin) The type of the crush bucket in which each set of chunks
|
||||
defined by l will be stored. For instance, if it is set to rack, each
|
||||
group of l chunks will be placed in a different rack. It is used to
|
||||
create a CRUSH rule step such as step choose rack. If it is not set,
|
||||
no such grouping is done.
|
||||
ec-profile-durability-estimator:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(shec plugin - c) The number of parity chunks each of which includes
|
||||
each data chunk in its calculation range. The number is used as a
|
||||
durability estimator. For instance, if c=2, 2 OSDs can be down
|
||||
without losing data.
|
||||
ec-profile-helper-chunks:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(clay plugin - d) Number of OSDs requested to send data during
|
||||
recovery of a single chunk. d needs to be chosen such that
|
||||
k+1 <= d <= k+m-1. Larger the d, the better the savings.
|
||||
ec-profile-scalar-mds:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(clay plugin) specifies the plugin that is used as a building
|
||||
block in the layered construction. It can be one of jerasure,
|
||||
isa, shec (defaults to jerasure).
|
||||
ec-profile-plugin:
|
||||
type: string
|
||||
default: jerasure
|
||||
description: |
|
||||
EC plugin to use for this applications pool. The following list of
|
||||
plugins acceptable - jerasure, lrc, isa, shec, clay.
|
||||
ec-profile-technique:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
EC profile technique used for this applications pool - will be
|
||||
validated based on the plugin configured via ec-profile-plugin.
|
||||
Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’,
|
||||
‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure,
|
||||
‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’
|
||||
for shec.
|
||||
ec-profile-device-class:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Device class from CRUSH map to use for placement groups for
|
||||
erasure profile - valid values: ssd, hdd or nvme (or leave
|
||||
unset to not use a device class).
|
||||
bluestore-compression-algorithm:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Compressor to use (if any) for pools requested by this charm.
|
||||
.
|
||||
NOTE: The ceph-osd charm sets a global default for this value (defaults
|
||||
to 'lz4' unless configured by the end user) which will be used unless
|
||||
specified for individual pools.
|
||||
bluestore-compression-mode:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Policy for using compression on pools requested by this charm.
|
||||
.
|
||||
'none' means never use compression.
|
||||
'passive' means use compression when clients hint that data is
|
||||
compressible.
|
||||
'aggressive' means use compression unless clients hint that
|
||||
data is not compressible.
|
||||
'force' means use compression under all circumstances even if the clients
|
||||
hint that the data is not compressible.
|
||||
bluestore-compression-required-ratio:
|
||||
type: float
|
||||
default:
|
||||
description: |
|
||||
The ratio of the size of the data chunk after compression relative to the
|
||||
original size must be at least this small in order to store the
|
||||
compressed version on pools requested by this charm.
|
||||
bluestore-compression-min-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks smaller than this are never compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-min-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-min-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for solid state media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks larger than this are broken into smaller blobs sizing bluestore
|
||||
compression max blob size before being compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-max-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for solid state media on
|
||||
pools requested by this charm.
|
77
ops-sunbeam/shared_code/sunbeam-charm-init.py
Executable file
77
ops-sunbeam/shared_code/sunbeam-charm-init.py
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import shutil
|
||||
import yaml
|
||||
import argparse
|
||||
import tempfile
|
||||
import os
|
||||
import glob
|
||||
from cookiecutter.main import cookiecutter
|
||||
import subprocess
|
||||
|
||||
from datetime import datetime
|
||||
import sys
|
||||
|
||||
def start_msg():
|
||||
print("This tool is designed to be used after 'charmcraft init' was initially run")
|
||||
|
||||
def cookie(output_dir, extra_context):
|
||||
cookiecutter(
|
||||
'sunbeam_charm/',
|
||||
extra_context=extra_context,
|
||||
output_dir=output_dir)
|
||||
|
||||
def arg_parser():
|
||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
||||
parser.add_argument('charm_path', help='path to charm')
|
||||
return parser.parse_args()
|
||||
|
||||
def read_metadata_file(charm_dir):
|
||||
with open(f'{charm_dir}/metadata.yaml', 'r') as f:
|
||||
metadata = yaml.load(f, Loader=yaml.FullLoader)
|
||||
return metadata
|
||||
|
||||
def switch_dir():
|
||||
abspath = os.path.abspath(__file__)
|
||||
dname = os.path.dirname(abspath)
|
||||
os.chdir(dname)
|
||||
|
||||
def get_extra_context(charm_dir):
|
||||
metadata = read_metadata_file(charm_dir)
|
||||
charm_name = metadata['name']
|
||||
service_name = charm_name.replace('sunbeam-', '')
|
||||
service_name = service_name.replace('-operator', '')
|
||||
service_name = service_name.replace('-k8s', '')
|
||||
ctxt = {
|
||||
'service_name': service_name,
|
||||
'charm_name': charm_name}
|
||||
# XXX REMOVE
|
||||
ctxt['db_sync_command'] = 'ironic-dbsync --config-file /etc/ironic/ironic.conf create_schema'
|
||||
ctxt['ingress_port'] = 6385
|
||||
return ctxt
|
||||
|
||||
def sync_code(src_dir, target_dir):
|
||||
cmd = ['rsync', '-r', '-v', f'{src_dir}/', target_dir]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
def main() -> int:
|
||||
"""Echo the input arguments to standard output"""
|
||||
start_msg()
|
||||
args = arg_parser()
|
||||
charm_dir = args.charm_path
|
||||
switch_dir()
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
extra_context = get_extra_context(charm_dir)
|
||||
service_name = extra_context['service_name']
|
||||
cookie(
|
||||
tmpdirname,
|
||||
extra_context)
|
||||
src_dir = f"{tmpdirname}/{service_name}"
|
||||
shutil.copyfile(
|
||||
f'{src_dir}/src/templates/wsgi-template.conf.j2',
|
||||
f'{src_dir}/src/templates/wsgi-{service_name}-api.conf')
|
||||
sync_code(src_dir, charm_dir)
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
9
ops-sunbeam/shared_code/sunbeam_charm/cookiecutter.json
Normal file
9
ops-sunbeam/shared_code/sunbeam_charm/cookiecutter.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"service_name": "",
|
||||
"charm_name": "",
|
||||
"ingress_port": "",
|
||||
"db_sync_command": "",
|
||||
"_copy_without_render": [
|
||||
"src/templates"
|
||||
]
|
||||
}
|
11
ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/.gitignore
vendored
Normal file
11
ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/.gitignore
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
venv/
|
||||
build/
|
||||
*.charm
|
||||
.tox/
|
||||
.coverage
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
.idea
|
||||
.vscode/
|
||||
*.swp
|
||||
.stestr/
|
@@ -0,0 +1,5 @@
|
||||
[gerrit]
|
||||
host=review.opendev.org
|
||||
port=29418
|
||||
project=openstack/charm-{{ cookiecutter.service_name }}-k8s.git
|
||||
defaultbranch=main
|
@@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./tests/unit
|
||||
top_dir=./tests
|
@@ -0,0 +1,11 @@
|
||||
- project:
|
||||
templates:
|
||||
- openstack-python3-charm-jobs
|
||||
- openstack-cover-jobs
|
||||
- microk8s-func-test
|
||||
vars:
|
||||
charm_build_name: {{ cookiecutter.service_name }}-k8s
|
||||
juju_channel: 3.2/stable
|
||||
juju_classic_mode: false
|
||||
microk8s_channel: 1.26-strict/stable
|
||||
microk8s_classic_mode: false
|
@@ -0,0 +1,2 @@
|
||||
# NOTE: no actions yet!
|
||||
{ }
|
@@ -0,0 +1,31 @@
|
||||
type: "charm"
|
||||
bases:
|
||||
- build-on:
|
||||
- name: "ubuntu"
|
||||
channel: "22.04"
|
||||
run-on:
|
||||
- name: "ubuntu"
|
||||
channel: "22.04"
|
||||
parts:
|
||||
update-certificates:
|
||||
plugin: nil
|
||||
override-build: |
|
||||
apt update
|
||||
apt install -y ca-certificates
|
||||
update-ca-certificates
|
||||
|
||||
charm:
|
||||
after: [update-certificates]
|
||||
build-packages:
|
||||
- git
|
||||
- libffi-dev
|
||||
- libssl-dev
|
||||
- rustc
|
||||
- cargo
|
||||
- pkg-config
|
||||
charm-binary-python-packages:
|
||||
- cryptography
|
||||
- jsonschema
|
||||
- pydantic<2.0
|
||||
- jinja2
|
||||
- git+https://opendev.org/openstack/charm-ops-sunbeam#egg=ops_sunbeam
|
@@ -0,0 +1,27 @@
|
||||
options:
|
||||
debug:
|
||||
default: False
|
||||
description: Enable debug logging.
|
||||
type: boolean
|
||||
os-admin-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the admin endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
os-internal-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the internal endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
os-public-hostname:
|
||||
default: glance.juju
|
||||
description: |
|
||||
The hostname or address of the internal endpoints that should be advertised
|
||||
in the glance image provider.
|
||||
type: string
|
||||
region:
|
||||
default: RegionOne
|
||||
description: Space delimited list of OpenStack regions
|
||||
type: string
|
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "INFO: Fetching libs from charmhub."
|
||||
# charmcraft fetch-lib charms.data_platform_libs.v0.database_requires
|
||||
# charmcraft fetch-lib charms.keystone_k8s.v1.identity_service
|
||||
# charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
|
||||
# charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
@@ -0,0 +1,52 @@
|
||||
name: {{ cookiecutter.charm_name }}
|
||||
summary: OpenStack {{ cookiecutter.service_name }} service
|
||||
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
|
||||
description: |
|
||||
OpenStack {{ cookiecutter.service_name }} provides an HTTP service for managing, selecting,
|
||||
and claiming providers of classes of inventory representing available
|
||||
resources in a cloud.
|
||||
.
|
||||
version: 3
|
||||
bases:
|
||||
- name: ubuntu
|
||||
channel: 22.04/stable
|
||||
assumes:
|
||||
- k8s-api
|
||||
- juju >= 3.2
|
||||
tags:
|
||||
- openstack
|
||||
source: https://opendev.org/openstack/charm-{{ cookiecutter.service_name }}-k8s
|
||||
issues: https://bugs.launchpad.net/charm-{{ cookiecutter.service_name }}-k8s
|
||||
|
||||
containers:
|
||||
{{ cookiecutter.service_name }}-api:
|
||||
resource: {{ cookiecutter.service_name }}-api-image
|
||||
|
||||
resources:
|
||||
{{ cookiecutter.service_name }}-api-image:
|
||||
type: oci-image
|
||||
description: OCI image for OpenStack {{ cookiecutter.service_name }}
|
||||
|
||||
requires:
|
||||
database:
|
||||
interface: mysql_client
|
||||
limit: 1
|
||||
identity-service:
|
||||
interface: keystone
|
||||
ingress-internal:
|
||||
interface: ingress
|
||||
optional: true
|
||||
limit: 1
|
||||
ingress-public:
|
||||
interface: ingress
|
||||
limit: 1
|
||||
amqp:
|
||||
interface: rabbitmq
|
||||
|
||||
provides:
|
||||
{{ cookiecutter.service_name }}:
|
||||
interface: {{ cookiecutter.service_name }}
|
||||
|
||||
peers:
|
||||
peers:
|
||||
interface: {{ cookiecutter.service_name }}-peer
|
@@ -0,0 +1,10 @@
|
||||
- project:
|
||||
templates:
|
||||
- charm-publish-jobs
|
||||
vars:
|
||||
needs_charm_build: true
|
||||
charm_build_name: {{ cookiecutter.service_name }}-k8s
|
||||
build_type: charmcraft
|
||||
publish_charm: true
|
||||
charmcraft_channel: 2.0/stable
|
||||
publish_channel: 2023.1/edge
|
@@ -0,0 +1,39 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
# Testing tools configuration
|
||||
[tool.coverage.run]
|
||||
branch = true
|
||||
|
||||
[tool.coverage.report]
|
||||
show_missing = true
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
log_cli_level = "INFO"
|
||||
|
||||
# Formatting tools configuration
|
||||
[tool.black]
|
||||
line-length = 79
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
multi_line_output = 3
|
||||
force_grid_wrap = true
|
||||
|
||||
# Linting tools configuration
|
||||
[tool.flake8]
|
||||
max-line-length = 79
|
||||
max-doc-length = 99
|
||||
max-complexity = 10
|
||||
exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
|
||||
select = ["E", "W", "F", "C", "N", "R", "D", "H"]
|
||||
# Ignore W503, E501 because using black creates errors with this
|
||||
# Ignore D107 Missing docstring in __init__
|
||||
ignore = ["W503", "E501", "D107", "E402"]
|
||||
per-file-ignores = []
|
||||
docstring-convention = "google"
|
||||
# Check for properly formatted copyright header in each file
|
||||
copyright-check = "True"
|
||||
copyright-author = "Canonical Ltd."
|
||||
copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
|
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}')
|
||||
echo "renaming ${charm}_*.charm to ${charm}.charm"
|
||||
echo -n "pwd: "
|
||||
pwd
|
||||
ls -al
|
||||
echo "Removing bad downloaded charm maybe?"
|
||||
if [[ -e "${charm}.charm" ]];
|
||||
then
|
||||
rm "${charm}.charm"
|
||||
fi
|
||||
echo "Renaming charm here."
|
||||
mv ${charm}_*.charm ${charm}.charm
|
@@ -0,0 +1,9 @@
|
||||
ops
|
||||
jinja2
|
||||
git+https://github.com/openstack/charm-ops-sunbeam#egg=ops_sunbeam
|
||||
lightkube
|
||||
pydantic<2.0
|
||||
|
||||
# Uncomment below if charm relates to ceph
|
||||
# git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
|
||||
# git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
|
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python3
|
||||
"""{{ cookiecutter.service_name[0]|upper}}{{cookiecutter.service_name[1:] }} Operator Charm.
|
||||
|
||||
This charm provide {{ cookiecutter.service_name[0]|upper}}{{cookiecutter.service_name[1:] }} services as part of an OpenStack deployment
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from ops.framework import StoredState
|
||||
from ops.main import main
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class {{ cookiecutter.service_name[0]|upper}}{{cookiecutter.service_name[1:] }}OperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
"""Charm the service."""
|
||||
|
||||
_state = StoredState()
|
||||
service_name = "{{ cookiecutter.service_name }}-api"
|
||||
wsgi_admin_script = '/usr/bin/{{ cookiecutter.service_name }}-api-wsgi'
|
||||
wsgi_public_script = '/usr/bin/{{ cookiecutter.service_name }}-api-wsgi'
|
||||
|
||||
db_sync_cmds = [
|
||||
{{ cookiecutter.db_sync_command.split() }}
|
||||
]
|
||||
|
||||
@property
|
||||
def service_conf(self) -> str:
|
||||
"""Service default configuration file."""
|
||||
return "/etc/{{ cookiecutter.service_name }}/{{ cookiecutter.service_name }}.conf"
|
||||
|
||||
@property
|
||||
def service_user(self) -> str:
|
||||
"""Service user file and directory ownership."""
|
||||
return '{{ cookiecutter.service_name }}'
|
||||
|
||||
@property
|
||||
def service_group(self) -> str:
|
||||
"""Service group file and directory ownership."""
|
||||
return '{{ cookiecutter.service_name }}'
|
||||
|
||||
@property
|
||||
def service_endpoints(self):
|
||||
"""Return service endpoints for the service."""
|
||||
return [
|
||||
{
|
||||
'service_name': '{{ cookiecutter.service_name }}',
|
||||
'type': '{{ cookiecutter.service_name }}',
|
||||
'description': "OpenStack {{ cookiecutter.service_name[0]|upper}}{{cookiecutter.service_name[1:] }} API",
|
||||
'internal_url': f'{self.internal_url}',
|
||||
'public_url': f'{self.public_url}',
|
||||
'admin_url': f'{self.admin_url}'}]
|
||||
|
||||
@property
|
||||
def default_public_ingress_port(self):
|
||||
"""Ingress Port for API service."""
|
||||
return {{ cookiecutter.ingress_port }}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main({{ cookiecutter.service_name[0]|upper}}{{cookiecutter.service_name[1:] }}OperatorCharm)
|
@@ -0,0 +1,22 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# ceph configuration file maintained in aso
|
||||
# local changes may be overwritten.
|
||||
###############################################################################
|
||||
[global]
|
||||
{% if ceph.auth -%}
|
||||
auth_supported = {{ ceph.auth }}
|
||||
mon host = {{ ceph.mon_hosts }}
|
||||
{% endif -%}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
log to syslog = false
|
||||
err to syslog = false
|
||||
clog to syslog = false
|
||||
{% if ceph.rbd_features %}
|
||||
rbd default features = {{ ceph.rbd_features }}
|
||||
{% endif %}
|
||||
|
||||
[client]
|
||||
{% if ceph_config.rbd_default_data_pool -%}
|
||||
rbd default data pool = {{ ceph_config.rbd_default_data_pool }}
|
||||
{% endif %}
|
@@ -0,0 +1,3 @@
|
||||
{% if database.connection -%}
|
||||
connection = {{ database.connection }}
|
||||
{% endif -%}
|
@@ -0,0 +1,23 @@
|
||||
{% if identity_service.admin_auth_url -%}
|
||||
auth_url = {{ identity_service.admin_auth_url }}
|
||||
interface = admin
|
||||
{% elif identity_service.internal_auth_url -%}
|
||||
auth_url = {{ identity_service.internal_auth_url }}
|
||||
interface = internal
|
||||
{% elif identity_service.internal_host -%}
|
||||
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||
interface = internal
|
||||
{% endif -%}
|
||||
{% if identity_service.public_auth_url -%}
|
||||
www_authenticate_uri = {{ identity_service.public_auth_url }}
|
||||
{% elif identity_service.internal_host -%}
|
||||
www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||
{% endif -%}
|
||||
auth_type = password
|
||||
project_domain_name = {{ identity_service.service_domain_name }}
|
||||
user_domain_name = {{ identity_service.service_domain_name }}
|
||||
project_name = {{ identity_service.service_project_name }}
|
||||
username = {{ identity_service.service_user_name }}
|
||||
password = {{ identity_service.service_password }}
|
||||
service_token_roles = {{ identity_service.admin_role }}
|
||||
service_token_roles_required = True
|
@@ -0,0 +1,3 @@
|
||||
[database]
|
||||
{% include "parts/database-connection" %}
|
||||
connection_recycle_time = 200
|
@@ -0,0 +1,10 @@
|
||||
{% if trusted_dashboards %}
|
||||
[federation]
|
||||
{% for dashboard_url in trusted_dashboards -%}
|
||||
trusted_dashboard = {{ dashboard_url }}
|
||||
{% endfor -%}
|
||||
{% endif %}
|
||||
{% for sp in fid_sps -%}
|
||||
[{{ sp['protocol-name'] }}]
|
||||
remote_id_attribute = {{ sp['remote-id-attribute'] }}
|
||||
{% endfor -%}
|
@@ -0,0 +1,2 @@
|
||||
[keystone_authtoken]
|
||||
{% include "parts/identity-data" %}
|
@@ -0,0 +1,6 @@
|
||||
{% for section in sections -%}
|
||||
[{{section}}]
|
||||
{% for key, value in sections[section].items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor %}
|
||||
{%- endfor %}
|
@@ -0,0 +1,15 @@
|
||||
{% if enable_signing -%}
|
||||
[signing]
|
||||
{% if certfile -%}
|
||||
certfile = {{ certfile }}
|
||||
{% endif -%}
|
||||
{% if keyfile -%}
|
||||
keyfile = {{ keyfile }}
|
||||
{% endif -%}
|
||||
{% if ca_certs -%}
|
||||
ca_certs = {{ ca_certs }}
|
||||
{% endif -%}
|
||||
{% if ca_key -%}
|
||||
ca_key = {{ ca_key }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
@@ -0,0 +1,28 @@
|
||||
Listen {{ wsgi_config.public_port }}
|
||||
|
||||
<VirtualHost *:{{ wsgi_config.public_port }}>
|
||||
WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ wsgi_config.group }}
|
||||
{% if ingress_public.ingress_path -%}
|
||||
WSGIScriptAlias {{ ingress_public.ingress_path }} {{ wsgi_config.wsgi_public_script }}
|
||||
{% endif -%}
|
||||
WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog {{ wsgi_config.error_log }}
|
||||
CustomLog {{ wsgi_config.custom_log }} combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
@@ -0,0 +1,9 @@
|
||||
# This file is managed centrally. If you find the need to modify this as a
|
||||
# one-off, please don't. Intead, consult #openstack-charms and ask about
|
||||
# requirements management in charms via bot-control. Thank you.
|
||||
|
||||
coverage
|
||||
mock
|
||||
flake8
|
||||
stestr
|
||||
ops
|
@@ -0,0 +1,18 @@
|
||||
gate_bundles:
|
||||
- smoke
|
||||
smoke_bundles:
|
||||
- smoke
|
||||
configure:
|
||||
- zaza.openstack.charm_tests.keystone.setup.add_tempest_roles
|
||||
tests: []
|
||||
tests_options:
|
||||
trust:
|
||||
- smoke
|
||||
ignore_hard_deploy_errors:
|
||||
- smoke
|
||||
|
||||
tempest:
|
||||
default:
|
||||
smoke: True
|
||||
|
||||
target_deploy_status: []
|
@@ -0,0 +1,161 @@
|
||||
# Operator charm (with zaza): tox.ini
|
||||
|
||||
[tox]
|
||||
skipsdist = True
|
||||
envlist = pep8,py3
|
||||
sitepackages = False
|
||||
skip_missing_interpreters = False
|
||||
minversion = 3.18.0
|
||||
|
||||
[vars]
|
||||
src_path = {toxinidir}/src/
|
||||
tst_path = {toxinidir}/tests/
|
||||
lib_path = {toxinidir}/lib/
|
||||
pyproject_toml = {toxinidir}/pyproject.toml
|
||||
all_path = {[vars]src_path} {[vars]tst_path}
|
||||
|
||||
[testenv]
|
||||
basepython = python3
|
||||
setenv =
|
||||
PYTHONPATH = {toxinidir}:{[vars]lib_path}:{[vars]src_path}
|
||||
passenv =
|
||||
HOME
|
||||
PYTHONPATH
|
||||
install_command =
|
||||
pip install {opts} {packages}
|
||||
commands = stestr run --slowest {posargs}
|
||||
allowlist_externals =
|
||||
git
|
||||
charmcraft
|
||||
{toxinidir}/fetch-libs.sh
|
||||
{toxinidir}/rename.sh
|
||||
deps =
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:fmt]
|
||||
description = Apply coding style standards to code
|
||||
deps =
|
||||
black
|
||||
isort
|
||||
commands =
|
||||
isort {[vars]all_path} --skip-glob {[vars]lib_path} --skip {toxinidir}/.tox
|
||||
black --config {[vars]pyproject_toml} {[vars]all_path} --exclude {[vars]lib_path}
|
||||
|
||||
[testenv:build]
|
||||
basepython = python3
|
||||
deps =
|
||||
commands =
|
||||
charmcraft -v pack
|
||||
{toxinidir}/rename.sh
|
||||
|
||||
[testenv:fetch]
|
||||
basepython = python3
|
||||
deps =
|
||||
commands =
|
||||
{toxinidir}/fetch-libs.sh
|
||||
|
||||
[testenv:py3]
|
||||
basepython = python3
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
[testenv:py38]
|
||||
basepython = python3.8
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:py39]
|
||||
basepython = python3.9
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:py310]
|
||||
basepython = python3.10
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:cover]
|
||||
basepython = python3
|
||||
deps = {[testenv:py3]deps}
|
||||
setenv =
|
||||
{[testenv]setenv}
|
||||
PYTHON=coverage run
|
||||
commands =
|
||||
coverage erase
|
||||
stestr run --slowest {posargs}
|
||||
coverage combine
|
||||
coverage html -d cover
|
||||
coverage xml -o cover/coverage.xml
|
||||
coverage report
|
||||
|
||||
[testenv:pep8]
|
||||
description = Alias for lint
|
||||
deps = {[testenv:lint]deps}
|
||||
commands = {[testenv:lint]commands}
|
||||
|
||||
[testenv:lint]
|
||||
description = Check code against coding style standards
|
||||
deps =
|
||||
black
|
||||
flake8<6 # Pin version until https://github.com/savoirfairelinux/flake8-copyright/issues/19 is merged
|
||||
flake8-docstrings
|
||||
flake8-copyright
|
||||
flake8-builtins
|
||||
pyproject-flake8
|
||||
pep8-naming
|
||||
isort
|
||||
codespell
|
||||
commands =
|
||||
codespell {[vars]all_path}
|
||||
# pflake8 wrapper supports config from pyproject.toml
|
||||
pflake8 --exclude {[vars]lib_path} --config {toxinidir}/pyproject.toml {[vars]all_path}
|
||||
isort --check-only --diff {[vars]all_path} --skip-glob {[vars]lib_path}
|
||||
black --config {[vars]pyproject_toml} --check --diff {[vars]all_path} --exclude {[vars]lib_path}
|
||||
|
||||
[testenv:func-noop]
|
||||
basepython = python3
|
||||
deps =
|
||||
git+https://github.com/openstack-charmers/zaza.git@libjuju-3.1#egg=zaza
|
||||
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
|
||||
git+https://opendev.org/openstack/tempest.git#egg=tempest
|
||||
commands =
|
||||
functest-run-suite --help
|
||||
|
||||
[testenv:func]
|
||||
basepython = python3
|
||||
deps = {[testenv:func-noop]deps}
|
||||
commands =
|
||||
functest-run-suite --keep-model
|
||||
|
||||
[testenv:func-smoke]
|
||||
basepython = python3
|
||||
deps = {[testenv:func-noop]deps}
|
||||
setenv =
|
||||
TEST_MODEL_SETTINGS = automatically-retry-hooks=true
|
||||
TEST_MAX_RESOLVE_COUNT = 5
|
||||
commands =
|
||||
functest-run-suite --keep-model --smoke
|
||||
|
||||
[testenv:func-dev]
|
||||
basepython = python3
|
||||
deps = {[testenv:func-noop]deps}
|
||||
commands =
|
||||
functest-run-suite --keep-model --dev
|
||||
|
||||
[testenv:func-target]
|
||||
basepython = python3
|
||||
deps = {[testenv:func-noop]deps}
|
||||
commands =
|
||||
functest-run-suite --keep-model --bundle {posargs}
|
||||
|
||||
[coverage:run]
|
||||
branch = True
|
||||
concurrency = multiprocessing
|
||||
parallel = True
|
||||
source =
|
||||
.
|
||||
omit =
|
||||
.tox/*
|
||||
tests/*
|
||||
src/templates/*
|
||||
|
||||
[flake8]
|
||||
ignore=E226,W504
|
1
ops-sunbeam/shared_code/templates
Symbolic link
1
ops-sunbeam/shared_code/templates
Symbolic link
@@ -0,0 +1 @@
|
||||
aso_charm/{{cookiecutter.service_name}}/src/templates
|
5
ops-sunbeam/sunbeam-charm-init.sh
Executable file
5
ops-sunbeam/sunbeam-charm-init.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
[ -e .tox/cookie/bin/activate ] || tox -e cookie
|
||||
source .tox/cookie/bin/activate
|
||||
shared_code/sunbeam-charm-init.py $@
|
6
ops-sunbeam/test-requirements.txt
Normal file
6
ops-sunbeam/test-requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
coverage
|
||||
mock
|
||||
stestr
|
||||
requests
|
||||
pytest
|
||||
ops-scenario>=4.0
|
18
ops-sunbeam/tests/__init__.py
Normal file
18
ops-sunbeam/tests/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Unit tests for aso."""
|
||||
import ops.testing
|
||||
|
||||
ops.testing.SIMULATE_CAN_CONNECT = True
|
@@ -0,0 +1,224 @@
|
||||
"""CeilometerServiceProvides and Requires module.
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the ceilometer_service interface.
|
||||
|
||||
Import `CeilometerServiceRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "ceilometer_service"
|
||||
|
||||
Two events are also available to respond to:
|
||||
- config_changed
|
||||
- goneaway
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.ceilometer_k8s.v0.ceilometer_service import (
|
||||
CeilometerServiceRequires
|
||||
)
|
||||
|
||||
class CeilometerServiceClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# CeilometerService Requires
|
||||
self.ceilometer_service = CeilometerServiceRequires(
|
||||
self, "ceilometer_service",
|
||||
)
|
||||
self.framework.observe(
|
||||
self.ceilometer_service.on.config_changed,
|
||||
self._on_ceilometer_service_config_changed
|
||||
)
|
||||
self.framework.observe(
|
||||
self.ceilometer_service.on.goneaway,
|
||||
self._on_ceiometer_service_goneaway
|
||||
)
|
||||
|
||||
def _on_ceilometer_service_config_changed(self, event):
|
||||
'''React to the Ceilometer service config changed event.
|
||||
|
||||
This event happens when CeilometerService relation is added to the
|
||||
model and relation data is changed.
|
||||
'''
|
||||
# Do something with the configuration provided by relation.
|
||||
pass
|
||||
|
||||
def _on_ceilometer_service_goneaway(self, event):
|
||||
'''React to the CeilometerService goneaway event.
|
||||
|
||||
This event happens when CeilometerService relation is removed.
|
||||
'''
|
||||
# CeilometerService Relation has goneaway.
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
from ops.charm import (
|
||||
CharmBase,
|
||||
RelationBrokenEvent,
|
||||
RelationChangedEvent,
|
||||
RelationEvent,
|
||||
)
|
||||
from ops.framework import (
|
||||
EventSource,
|
||||
Object,
|
||||
ObjectEvents,
|
||||
)
|
||||
from ops.model import (
|
||||
Relation,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "fcbb94e7a18740729eaf9e2c3b90017f"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
|
||||
|
||||
class CeilometerConfigRequestEvent(RelationEvent):
|
||||
"""CeilometerConfigRequest Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CeilometerServiceProviderEvents(ObjectEvents):
|
||||
"""Events class for `on`."""
|
||||
|
||||
config_request = EventSource(CeilometerConfigRequestEvent)
|
||||
|
||||
|
||||
class CeilometerServiceProvides(Object):
|
||||
"""CeilometerServiceProvides class."""
|
||||
|
||||
on = CeilometerServiceProviderEvents()
|
||||
|
||||
def __init__(self, charm: CharmBase, relation_name: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ceilometer_service_relation_changed,
|
||||
)
|
||||
|
||||
def _on_ceilometer_service_relation_changed(
|
||||
self, event: RelationChangedEvent
|
||||
):
|
||||
"""Handle CeilometerService relation changed."""
|
||||
logging.debug("CeilometerService relation changed")
|
||||
self.on.config_request.emit(event.relation)
|
||||
|
||||
def set_config(
|
||||
self, relation: Optional[Relation], telemetry_secret: str
|
||||
) -> None:
|
||||
"""Set ceilometer configuration on the relation."""
|
||||
if not self.charm.unit.is_leader():
|
||||
logging.debug("Not a leader unit, skipping set config")
|
||||
return
|
||||
|
||||
# If relation is not provided send config to all the related
|
||||
# applications. This happens usually when config data is
|
||||
# updated by provider and wants to send the data to all
|
||||
# related applications
|
||||
if relation is None:
|
||||
logging.debug(
|
||||
"Sending config to all related applications of relation"
|
||||
f"{self.relation_name}"
|
||||
)
|
||||
for relation in self.framework.model.relations[self.relation_name]:
|
||||
relation.data[self.charm.app][
|
||||
"telemetry-secret"
|
||||
] = telemetry_secret
|
||||
else:
|
||||
logging.debug(
|
||||
f"Sending config on relation {relation.app.name} "
|
||||
f"{relation.name}/{relation.id}"
|
||||
)
|
||||
relation.data[self.charm.app][
|
||||
"telemetry-secret"
|
||||
] = telemetry_secret
|
||||
|
||||
|
||||
class CeilometerConfigChangedEvent(RelationEvent):
|
||||
"""CeilometerConfigChanged Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CeilometerServiceGoneAwayEvent(RelationEvent):
|
||||
"""CeilometerServiceGoneAway Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CeilometerServiceRequirerEvents(ObjectEvents):
|
||||
"""Events class for `on`."""
|
||||
|
||||
config_changed = EventSource(CeilometerConfigChangedEvent)
|
||||
goneaway = EventSource(CeilometerServiceGoneAwayEvent)
|
||||
|
||||
|
||||
class CeilometerServiceRequires(Object):
|
||||
"""CeilometerServiceRequires class."""
|
||||
|
||||
on = CeilometerServiceRequirerEvents()
|
||||
|
||||
def __init__(self, charm: CharmBase, relation_name: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ceilometer_service_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_ceilometer_service_relation_broken,
|
||||
)
|
||||
|
||||
def _on_ceilometer_service_relation_changed(
|
||||
self, event: RelationChangedEvent
|
||||
):
|
||||
"""Handle CeilometerService relation changed."""
|
||||
logging.debug("CeilometerService config data changed")
|
||||
self.on.config_changed.emit(event.relation)
|
||||
|
||||
def _on_ceilometer_service_relation_broken(
|
||||
self, event: RelationBrokenEvent
|
||||
):
|
||||
"""Handle CeilometerService relation changed."""
|
||||
logging.debug("CeilometerService on_broken")
|
||||
self.on.goneaway.emit(event.relation)
|
||||
|
||||
@property
|
||||
def _ceilometer_service_rel(self) -> Optional[Relation]:
|
||||
"""The ceilometer service relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
def get_remote_app_data(self, key: str) -> Optional[str]:
|
||||
"""Return the value for the given key from remote app data."""
|
||||
if self._ceilometer_service_rel:
|
||||
data = self._ceilometer_service_rel.data[
|
||||
self._ceilometer_service_rel.app
|
||||
]
|
||||
return data.get(key)
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def telemetry_secret(self) -> Optional[str]:
|
||||
"""Return the telemetry_secret."""
|
||||
return self.get_remote_app_data("telemetry-secret")
|
265
ops-sunbeam/tests/lib/charms/cinder_ceph_k8s/v0/ceph_access.py
Normal file
265
ops-sunbeam/tests/lib/charms/cinder_ceph_k8s/v0/ceph_access.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""CephAccess Provides and Requires module.
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the ceph-access interface.
|
||||
|
||||
Import `CephAccessRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "ceph_access"
|
||||
|
||||
Three events are also available to respond to:
|
||||
- connected
|
||||
- ready
|
||||
- goneaway
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.cinder_ceph_k8s.v0.ceph_access import CephAccessRequires
|
||||
|
||||
class CephAccessClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# CephAccess Requires
|
||||
self.ceph_access = CephAccessRequires(
|
||||
self,
|
||||
relation_name="ceph_access",
|
||||
)
|
||||
self.framework.observe(
|
||||
self.ceph_access.on.connected, self._on_ceph_access_connected)
|
||||
self.framework.observe(
|
||||
self.ceph_access.on.ready, self._on_ceph_access_ready)
|
||||
self.framework.observe(
|
||||
self.ceph_access.on.goneaway, self._on_ceph_access_goneaway)
|
||||
|
||||
def _on_ceph_access_connected(self, event):
|
||||
'''React to the CephAccess connected event.
|
||||
|
||||
This event happens when n CephAccess relation is added to the
|
||||
model before credentials etc have been provided.
|
||||
'''
|
||||
# Do something before the relation is complete
|
||||
pass
|
||||
|
||||
def _on_ceph_access_ready(self, event):
|
||||
'''React to the CephAccess ready event.
|
||||
|
||||
This event happens when an CephAccess relation is removed.
|
||||
'''
|
||||
# IdentityService Relation has goneaway. shutdown services or suchlike
|
||||
pass
|
||||
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "7fa8d4f8407c4f31ab1deb51c0c046f1"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from ops import (
|
||||
RelationEvent
|
||||
)
|
||||
from ops.model import (
|
||||
Relation,
|
||||
Secret,
|
||||
SecretNotFoundError,
|
||||
)
|
||||
from ops.framework import (
|
||||
EventBase,
|
||||
ObjectEvents,
|
||||
EventSource,
|
||||
Object,
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CephAccessConnectedEvent(EventBase):
|
||||
"""CephAccess connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CephAccessReadyEvent(EventBase):
|
||||
"""CephAccess ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CephAccessGoneAwayEvent(EventBase):
|
||||
"""CephAccess relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CephAccessServerEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(CephAccessConnectedEvent)
|
||||
ready = EventSource(CephAccessReadyEvent)
|
||||
goneaway = EventSource(CephAccessGoneAwayEvent)
|
||||
|
||||
|
||||
class CephAccessRequires(Object):
|
||||
"""
|
||||
CephAccessRequires class
|
||||
"""
|
||||
|
||||
|
||||
on = CephAccessServerEvents()
|
||||
|
||||
def __init__(self, charm, relation_name: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_ceph_access_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ceph_access_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_departed,
|
||||
self._on_ceph_access_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_ceph_access_relation_broken,
|
||||
)
|
||||
|
||||
@property
|
||||
def _ceph_access_rel(self) -> Relation:
|
||||
"""The CephAccess relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
def get_remote_app_data(self, key: str) -> Optional[str]:
|
||||
"""Return the value for the given key from remote app data."""
|
||||
data = self._ceph_access_rel.data[self._ceph_access_rel.app]
|
||||
return data.get(key)
|
||||
|
||||
def _on_ceph_access_relation_joined(self, event):
|
||||
"""CephAccess relation joined."""
|
||||
logging.debug("CephAccess on_joined")
|
||||
self.on.connected.emit()
|
||||
|
||||
def _on_ceph_access_relation_changed(self, event):
|
||||
"""CephAccess relation changed."""
|
||||
logging.debug("CephAccess on_changed")
|
||||
try:
|
||||
if self.ready:
|
||||
self.on.ready.emit()
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
def _on_ceph_access_relation_broken(self, event):
|
||||
"""CephAccess relation broken."""
|
||||
logging.debug("CephAccess on_broken")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
def _retrieve_secret(self) -> Optional[Secret]:
|
||||
try:
|
||||
credentials_id = self.get_remote_app_data('access-credentials')
|
||||
if not credentials_id:
|
||||
return None
|
||||
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||
return credentials
|
||||
except SecretNotFoundError:
|
||||
logger.warning(f"Secret {credentials_id} not found")
|
||||
return None
|
||||
|
||||
@property
|
||||
def ceph_access_data(self) -> dict:
|
||||
"""Return the service_password."""
|
||||
secret = self._retrieve_secret()
|
||||
if not secret:
|
||||
return {}
|
||||
return secret.get_content()
|
||||
|
||||
@property
|
||||
def ready(self) -> bool:
|
||||
"""Return the service_password."""
|
||||
return all(k in self.ceph_access_data for k in ["uuid", "key"])
|
||||
|
||||
class HasCephAccessClientsEvent(EventBase):
|
||||
"""Has CephAccessClients Event."""
|
||||
|
||||
pass
|
||||
|
||||
class ReadyCephAccessClientsEvent(RelationEvent):
|
||||
"""Has ReadyCephAccessClients Event."""
|
||||
|
||||
pass
|
||||
|
||||
class CephAccessClientEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
has_ceph_access_clients = EventSource(HasCephAccessClientsEvent)
|
||||
ready_ceph_access_clients = EventSource(ReadyCephAccessClientsEvent)
|
||||
|
||||
|
||||
class CephAccessProvides(Object):
|
||||
"""
|
||||
CephAccessProvides class
|
||||
"""
|
||||
|
||||
on = CephAccessClientEvents()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_ceph_access_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ceph_access_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_ceph_access_relation_broken,
|
||||
)
|
||||
|
||||
def _on_ceph_access_relation_joined(self, event):
|
||||
"""Handle CephAccess joined."""
|
||||
logging.debug("CephAccess on_joined")
|
||||
self.on.has_ceph_access_clients.emit()
|
||||
|
||||
def _on_ceph_access_relation_changed(self, event):
|
||||
"""Handle CephAccess joined."""
|
||||
logging.debug("CephAccess on_changed")
|
||||
self.on.ready_ceph_access_clients.emit(
|
||||
event.relation,
|
||||
app=event.app,
|
||||
unit=event.unit)
|
||||
|
||||
def _on_ceph_access_relation_broken(self, event):
|
||||
"""Handle CephAccess broken."""
|
||||
logging.debug("CephAccessProvides on_broken")
|
||||
|
||||
def set_ceph_access_credentials(self, relation_name: int,
|
||||
relation_id: str,
|
||||
access_credentials: str):
|
||||
|
||||
logging.debug("Setting ceph_access connection information.")
|
||||
_ceph_access_rel = None
|
||||
for relation in self.framework.model.relations[relation_name]:
|
||||
if relation.id == relation_id:
|
||||
_ceph_access_rel = relation
|
||||
if not _ceph_access_rel:
|
||||
# Relation has disappeared so skip send of data
|
||||
return
|
||||
app_data = _ceph_access_rel.data[self.charm.app]
|
||||
logging.debug(access_credentials)
|
||||
app_data["access-credentials"] = access_credentials
|
@@ -0,0 +1,537 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
r"""[DEPRECATED] Relation 'requires' side abstraction for database relation.
|
||||
|
||||
This library is a uniform interface to a selection of common database
|
||||
metadata, with added custom events that add convenience to database management,
|
||||
and methods to consume the application related data.
|
||||
|
||||
Following an example of using the DatabaseCreatedEvent, in the context of the
|
||||
application charm code:
|
||||
|
||||
```python
|
||||
|
||||
from charms.data_platform_libs.v0.database_requires import (
|
||||
DatabaseCreatedEvent,
|
||||
DatabaseRequires,
|
||||
)
|
||||
|
||||
class ApplicationCharm(CharmBase):
|
||||
# Application charm that connects to database charms.
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
|
||||
# Charm events defined in the database requires charm library.
|
||||
self.database = DatabaseRequires(self, relation_name="database", database_name="database")
|
||||
self.framework.observe(self.database.on.database_created, self._on_database_created)
|
||||
|
||||
def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||
# Handle the created database
|
||||
|
||||
# Create configuration file for app
|
||||
config_file = self._render_app_config_file(
|
||||
event.username,
|
||||
event.password,
|
||||
event.endpoints,
|
||||
)
|
||||
|
||||
# Start application with rendered configuration
|
||||
self._start_application(config_file)
|
||||
|
||||
# Set active status
|
||||
self.unit.status = ActiveStatus("received database credentials")
|
||||
```
|
||||
|
||||
As shown above, the library provides some custom events to handle specific situations,
|
||||
which are listed below:
|
||||
|
||||
— database_created: event emitted when the requested database is created.
|
||||
— endpoints_changed: event emitted when the read/write endpoints of the database have changed.
|
||||
— read_only_endpoints_changed: event emitted when the read-only endpoints of the database
|
||||
have changed. Event is not triggered if read/write endpoints changed too.
|
||||
|
||||
If it is needed to connect multiple database clusters to the same relation endpoint
|
||||
the application charm can implement the same code as if it would connect to only
|
||||
one database cluster (like the above code example).
|
||||
|
||||
To differentiate multiple clusters connected to the same relation endpoint
|
||||
the application charm can use the name of the remote application:
|
||||
|
||||
```python
|
||||
|
||||
def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||
# Get the remote app name of the cluster that triggered this event
|
||||
cluster = event.relation.app.name
|
||||
```
|
||||
|
||||
It is also possible to provide an alias for each different database cluster/relation.
|
||||
|
||||
So, it is possible to differentiate the clusters in two ways.
|
||||
The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
|
||||
|
||||
The second way is to use different event handlers to handle each cluster events.
|
||||
The implementation would be something like the following code:
|
||||
|
||||
```python
|
||||
|
||||
from charms.data_platform_libs.v0.database_requires import (
|
||||
DatabaseCreatedEvent,
|
||||
DatabaseRequires,
|
||||
)
|
||||
|
||||
class ApplicationCharm(CharmBase):
|
||||
# Application charm that connects to database charms.
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
|
||||
# Define the cluster aliases and one handler for each cluster database created event.
|
||||
self.database = DatabaseRequires(
|
||||
self,
|
||||
relation_name="database",
|
||||
database_name="database",
|
||||
relations_aliases = ["cluster1", "cluster2"],
|
||||
)
|
||||
self.framework.observe(
|
||||
self.database.on.cluster1_database_created, self._on_cluster1_database_created
|
||||
)
|
||||
self.framework.observe(
|
||||
self.database.on.cluster2_database_created, self._on_cluster2_database_created
|
||||
)
|
||||
|
||||
def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||
# Handle the created database on the cluster named cluster1
|
||||
|
||||
# Create configuration file for app
|
||||
config_file = self._render_app_config_file(
|
||||
event.username,
|
||||
event.password,
|
||||
event.endpoints,
|
||||
)
|
||||
...
|
||||
|
||||
def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||
# Handle the created database on the cluster named cluster2
|
||||
|
||||
# Create configuration file for app
|
||||
config_file = self._render_app_config_file(
|
||||
event.username,
|
||||
event.password,
|
||||
event.endpoints,
|
||||
)
|
||||
...
|
||||
|
||||
```
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
|
||||
from ops.charm import (
|
||||
CharmEvents,
|
||||
RelationChangedEvent,
|
||||
RelationEvent,
|
||||
RelationJoinedEvent,
|
||||
)
|
||||
from ops.framework import EventSource, Object
|
||||
from ops.model import Relation
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "0241e088ffa9440fb4e3126349b2fb62"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version.
|
||||
LIBPATCH = 6
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseEvent(RelationEvent):
|
||||
"""Base class for database events."""
|
||||
|
||||
@property
|
||||
def endpoints(self) -> Optional[str]:
|
||||
"""Returns a comma separated list of read/write endpoints."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("endpoints")
|
||||
|
||||
@property
|
||||
def password(self) -> Optional[str]:
|
||||
"""Returns the password for the created user."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("password")
|
||||
|
||||
@property
|
||||
def read_only_endpoints(self) -> Optional[str]:
|
||||
"""Returns a comma separated list of read only endpoints."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("read-only-endpoints")
|
||||
|
||||
@property
|
||||
def replset(self) -> Optional[str]:
|
||||
"""Returns the replicaset name.
|
||||
|
||||
MongoDB only.
|
||||
"""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("replset")
|
||||
|
||||
@property
|
||||
def tls(self) -> Optional[str]:
|
||||
"""Returns whether TLS is configured."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("tls")
|
||||
|
||||
@property
|
||||
def tls_ca(self) -> Optional[str]:
|
||||
"""Returns TLS CA."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("tls-ca")
|
||||
|
||||
@property
|
||||
def uris(self) -> Optional[str]:
|
||||
"""Returns the connection URIs.
|
||||
|
||||
MongoDB, Redis, OpenSearch and Kafka only.
|
||||
"""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("uris")
|
||||
|
||||
@property
|
||||
def username(self) -> Optional[str]:
|
||||
"""Returns the created username."""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("username")
|
||||
|
||||
@property
|
||||
def version(self) -> Optional[str]:
|
||||
"""Returns the version of the database.
|
||||
|
||||
Version as informed by the database daemon.
|
||||
"""
|
||||
if not self.relation.app:
|
||||
return None
|
||||
|
||||
return self.relation.data[self.relation.app].get("version")
|
||||
|
||||
|
||||
class DatabaseCreatedEvent(DatabaseEvent):
|
||||
"""Event emitted when a new database is created for use on this relation."""
|
||||
|
||||
|
||||
class DatabaseEndpointsChangedEvent(DatabaseEvent):
|
||||
"""Event emitted when the read/write endpoints are changed."""
|
||||
|
||||
|
||||
class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent):
|
||||
"""Event emitted when the read only endpoints are changed."""
|
||||
|
||||
|
||||
class DatabaseEvents(CharmEvents):
|
||||
"""Database events.
|
||||
|
||||
This class defines the events that the database can emit.
|
||||
"""
|
||||
|
||||
database_created = EventSource(DatabaseCreatedEvent)
|
||||
endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
|
||||
read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
|
||||
|
||||
|
||||
Diff = namedtuple("Diff", "added changed deleted")
|
||||
Diff.__doc__ = """
|
||||
A tuple for storing the diff between two data mappings.
|
||||
|
||||
— added — keys that were added.
|
||||
— changed — keys that still exist but have new values.
|
||||
— deleted — keys that were deleted.
|
||||
"""
|
||||
|
||||
|
||||
class DatabaseRequires(Object):
|
||||
"""Requires-side of the database relation."""
|
||||
|
||||
on = DatabaseEvents() # pyright: ignore [reportGeneralTypeIssues]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm,
|
||||
relation_name: str,
|
||||
database_name: str,
|
||||
extra_user_roles: Optional[str] = None,
|
||||
relations_aliases: Optional[List[str]] = None,
|
||||
):
|
||||
"""Manager of database client relations."""
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.database = database_name
|
||||
self.extra_user_roles = extra_user_roles
|
||||
self.local_app = self.charm.model.app
|
||||
self.local_unit = self.charm.unit
|
||||
self.relation_name = relation_name
|
||||
self.relations_aliases = relations_aliases
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
|
||||
)
|
||||
|
||||
# Define custom event names for each alias.
|
||||
if relations_aliases:
|
||||
# Ensure the number of aliases does not exceed the maximum
|
||||
# of connections allowed in the specific relation.
|
||||
relation_connection_limit = self.charm.meta.requires[relation_name].limit
|
||||
if len(relations_aliases) != relation_connection_limit:
|
||||
raise ValueError(
|
||||
f"The number of aliases must match the maximum number of connections allowed in the relation. "
|
||||
f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
|
||||
)
|
||||
|
||||
for relation_alias in relations_aliases:
|
||||
self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
|
||||
self.on.define_event(
|
||||
f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
|
||||
)
|
||||
self.on.define_event(
|
||||
f"{relation_alias}_read_only_endpoints_changed",
|
||||
DatabaseReadOnlyEndpointsChangedEvent,
|
||||
)
|
||||
|
||||
def _assign_relation_alias(self, relation_id: int) -> None:
|
||||
"""Assigns an alias to a relation.
|
||||
|
||||
This function writes in the unit data bag.
|
||||
|
||||
Args:
|
||||
relation_id: the identifier for a particular relation.
|
||||
"""
|
||||
# If no aliases were provided, return immediately.
|
||||
if not self.relations_aliases:
|
||||
return
|
||||
|
||||
# Return if an alias was already assigned to this relation
|
||||
# (like when there are more than one unit joining the relation).
|
||||
if (
|
||||
self.charm.model.get_relation(self.relation_name, relation_id)
|
||||
.data[self.local_unit]
|
||||
.get("alias")
|
||||
):
|
||||
return
|
||||
|
||||
# Retrieve the available aliases (the ones that weren't assigned to any relation).
|
||||
available_aliases = self.relations_aliases[:]
|
||||
for relation in self.charm.model.relations[self.relation_name]:
|
||||
alias = relation.data[self.local_unit].get("alias")
|
||||
if alias:
|
||||
logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
|
||||
available_aliases.remove(alias)
|
||||
|
||||
# Set the alias in the unit relation databag of the specific relation.
|
||||
relation = self.charm.model.get_relation(self.relation_name, relation_id)
|
||||
relation.data[self.local_unit].update({"alias": available_aliases[0]})
|
||||
|
||||
def _diff(self, event: RelationChangedEvent) -> Diff:
|
||||
"""Retrieves the diff of the data in the relation changed databag.
|
||||
|
||||
Args:
|
||||
event: relation changed event.
|
||||
|
||||
Returns:
|
||||
a Diff instance containing the added, deleted and changed
|
||||
keys from the event relation databag.
|
||||
"""
|
||||
# Retrieve the old data from the data key in the local unit relation databag.
|
||||
old_data = json.loads(event.relation.data[self.local_unit].get("data", "{}"))
|
||||
# Retrieve the new data from the event relation databag.
|
||||
new_data = (
|
||||
{key: value for key, value in event.relation.data[event.app].items() if key != "data"}
|
||||
if event.app
|
||||
else {}
|
||||
)
|
||||
|
||||
# These are the keys that were added to the databag and triggered this event.
|
||||
added = new_data.keys() - old_data.keys()
|
||||
# These are the keys that were removed from the databag and triggered this event.
|
||||
deleted = old_data.keys() - new_data.keys()
|
||||
# These are the keys that already existed in the databag,
|
||||
# but had their values changed.
|
||||
changed = {
|
||||
key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]
|
||||
}
|
||||
|
||||
# TODO: evaluate the possibility of losing the diff if some error
|
||||
# happens in the charm before the diff is completely checked (DPE-412).
|
||||
# Convert the new_data to a serializable format and save it for a next diff check.
|
||||
event.relation.data[self.local_unit].update({"data": json.dumps(new_data)})
|
||||
|
||||
# Return the diff with all possible changes.
|
||||
return Diff(added, changed, deleted)
|
||||
|
||||
def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
|
||||
"""Emit an aliased event to a particular relation if it has an alias.
|
||||
|
||||
Args:
|
||||
event: the relation changed event that was received.
|
||||
event_name: the name of the event to emit.
|
||||
"""
|
||||
alias = self._get_relation_alias(event.relation.id)
|
||||
if alias:
|
||||
getattr(self.on, f"{alias}_{event_name}").emit(
|
||||
event.relation, app=event.app, unit=event.unit
|
||||
)
|
||||
|
||||
def _get_relation_alias(self, relation_id: int) -> Optional[str]:
|
||||
"""Returns the relation alias.
|
||||
|
||||
Args:
|
||||
relation_id: the identifier for a particular relation.
|
||||
|
||||
Returns:
|
||||
the relation alias or None if the relation was not found.
|
||||
"""
|
||||
for relation in self.charm.model.relations[self.relation_name]:
|
||||
if relation.id == relation_id:
|
||||
return relation.data[self.local_unit].get("alias")
|
||||
return None
|
||||
|
||||
def fetch_relation_data(self) -> dict:
|
||||
"""Retrieves data from relation.
|
||||
|
||||
This function can be used to retrieve data from a relation
|
||||
in the charm code when outside an event callback.
|
||||
|
||||
Returns:
|
||||
a dict of the values stored in the relation data bag
|
||||
for all relation instances (indexed by the relation ID).
|
||||
"""
|
||||
data = {}
|
||||
for relation in self.relations:
|
||||
data[relation.id] = (
|
||||
{key: value for key, value in relation.data[relation.app].items() if key != "data"}
|
||||
if relation.app
|
||||
else {}
|
||||
)
|
||||
return data
|
||||
|
||||
def _update_relation_data(self, relation_id: int, data: dict) -> None:
|
||||
"""Updates a set of key-value pairs in the relation.
|
||||
|
||||
This function writes in the application data bag, therefore,
|
||||
only the leader unit can call it.
|
||||
|
||||
Args:
|
||||
relation_id: the identifier for a particular relation.
|
||||
data: dict containing the key-value pairs
|
||||
that should be updated in the relation.
|
||||
"""
|
||||
if self.local_unit.is_leader():
|
||||
relation = self.charm.model.get_relation(self.relation_name, relation_id)
|
||||
relation.data[self.local_app].update(data)
|
||||
|
||||
def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
|
||||
"""Event emitted when the application joins the database relation."""
|
||||
# If relations aliases were provided, assign one to the relation.
|
||||
self._assign_relation_alias(event.relation.id)
|
||||
|
||||
# Sets both database and extra user roles in the relation
|
||||
# if the roles are provided. Otherwise, sets only the database.
|
||||
if self.extra_user_roles:
|
||||
self._update_relation_data(
|
||||
event.relation.id,
|
||||
{
|
||||
"database": self.database,
|
||||
"extra-user-roles": self.extra_user_roles,
|
||||
},
|
||||
)
|
||||
else:
|
||||
self._update_relation_data(event.relation.id, {"database": self.database})
|
||||
|
||||
def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
|
||||
"""Event emitted when the database relation has changed."""
|
||||
# Check which data has changed to emit customs events.
|
||||
diff = self._diff(event)
|
||||
|
||||
# Check if the database is created
|
||||
# (the database charm shared the credentials).
|
||||
if "username" in diff.added and "password" in diff.added:
|
||||
# Emit the default event (the one without an alias).
|
||||
logger.info("database created at %s", datetime.now())
|
||||
getattr(self.on, "database_created").emit(
|
||||
event.relation, app=event.app, unit=event.unit
|
||||
)
|
||||
|
||||
# Emit the aliased event (if any).
|
||||
self._emit_aliased_event(event, "database_created")
|
||||
|
||||
# To avoid unnecessary application restarts do not trigger
|
||||
# “endpoints_changed“ event if “database_created“ is triggered.
|
||||
return
|
||||
|
||||
# Emit an endpoints changed event if the database
|
||||
# added or changed this info in the relation databag.
|
||||
if "endpoints" in diff.added or "endpoints" in diff.changed:
|
||||
# Emit the default event (the one without an alias).
|
||||
logger.info("endpoints changed on %s", datetime.now())
|
||||
getattr(self.on, "endpoints_changed").emit(
|
||||
event.relation, app=event.app, unit=event.unit
|
||||
)
|
||||
|
||||
# Emit the aliased event (if any).
|
||||
self._emit_aliased_event(event, "endpoints_changed")
|
||||
|
||||
# To avoid unnecessary application restarts do not trigger
|
||||
# “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered.
|
||||
return
|
||||
|
||||
# Emit a read only endpoints changed event if the database
|
||||
# added or changed this info in the relation databag.
|
||||
if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
|
||||
# Emit the default event (the one without an alias).
|
||||
logger.info("read-only-endpoints changed on %s", datetime.now())
|
||||
getattr(self.on, "read_only_endpoints_changed").emit(
|
||||
event.relation, app=event.app, unit=event.unit
|
||||
)
|
||||
|
||||
# Emit the aliased event (if any).
|
||||
self._emit_aliased_event(event, "read_only_endpoints_changed")
|
||||
|
||||
@property
|
||||
def relations(self) -> List[Relation]:
|
||||
"""The list of Relation instances associated with this relation_name."""
|
||||
return list(self.charm.model.relations[self.relation_name])
|
@@ -0,0 +1,439 @@
|
||||
"""IdentityCredentialsProvides and Requires module.
|
||||
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the identity_credentials interface.
|
||||
|
||||
Import `IdentityCredentialsRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "identity_credentials"
|
||||
|
||||
Also provide additional parameters to the charm object:
|
||||
- service
|
||||
- internal_url
|
||||
- public_url
|
||||
- admin_url
|
||||
- region
|
||||
- username
|
||||
- vhost
|
||||
|
||||
Two events are also available to respond to:
|
||||
- connected
|
||||
- ready
|
||||
- goneaway
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.keystone_k8s.v0.identity_credentials import IdentityCredentialsRequires
|
||||
|
||||
class IdentityCredentialsClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# IdentityCredentials Requires
|
||||
self.identity_credentials = IdentityCredentialsRequires(
|
||||
self, "identity_credentials",
|
||||
service = "my-service"
|
||||
internal_url = "http://internal-url"
|
||||
public_url = "http://public-url"
|
||||
admin_url = "http://admin-url"
|
||||
region = "region"
|
||||
)
|
||||
self.framework.observe(
|
||||
self.identity_credentials.on.connected, self._on_identity_credentials_connected)
|
||||
self.framework.observe(
|
||||
self.identity_credentials.on.ready, self._on_identity_credentials_ready)
|
||||
self.framework.observe(
|
||||
self.identity_credentials.on.goneaway, self._on_identity_credentials_goneaway)
|
||||
|
||||
def _on_identity_credentials_connected(self, event):
|
||||
'''React to the IdentityCredentials connected event.
|
||||
|
||||
This event happens when IdentityCredentials relation is added to the
|
||||
model before credentials etc have been provided.
|
||||
'''
|
||||
# Do something before the relation is complete
|
||||
pass
|
||||
|
||||
def _on_identity_credentials_ready(self, event):
|
||||
'''React to the IdentityCredentials ready event.
|
||||
|
||||
The IdentityCredentials interface will use the provided config for the
|
||||
request to the identity server.
|
||||
'''
|
||||
# IdentityCredentials Relation is ready. Do something with the completed relation.
|
||||
pass
|
||||
|
||||
def _on_identity_credentials_goneaway(self, event):
|
||||
'''React to the IdentityCredentials goneaway event.
|
||||
|
||||
This event happens when an IdentityCredentials relation is removed.
|
||||
'''
|
||||
# IdentityCredentials Relation has goneaway. shutdown services or suchlike
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from ops.framework import (
|
||||
StoredState,
|
||||
EventBase,
|
||||
ObjectEvents,
|
||||
EventSource,
|
||||
Object,
|
||||
)
|
||||
from ops.model import (
|
||||
Relation,
|
||||
SecretNotFoundError,
|
||||
)
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "b5fa18d4427c4ab9a269c3a2fbed545c"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IdentityCredentialsConnectedEvent(EventBase):
|
||||
"""IdentityCredentials connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityCredentialsReadyEvent(EventBase):
|
||||
"""IdentityCredentials ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityCredentialsGoneAwayEvent(EventBase):
|
||||
"""IdentityCredentials relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityCredentialsServerEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(IdentityCredentialsConnectedEvent)
|
||||
ready = EventSource(IdentityCredentialsReadyEvent)
|
||||
goneaway = EventSource(IdentityCredentialsGoneAwayEvent)
|
||||
|
||||
|
||||
class IdentityCredentialsRequires(Object):
|
||||
"""
|
||||
IdentityCredentialsRequires class
|
||||
"""
|
||||
|
||||
on = IdentityCredentialsServerEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_identity_credentials_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_credentials_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_departed,
|
||||
self._on_identity_credentials_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_identity_credentials_relation_broken,
|
||||
)
|
||||
|
||||
def _on_identity_credentials_relation_joined(self, event):
|
||||
"""IdentityCredentials relation joined."""
|
||||
logging.debug("IdentityCredentials on_joined")
|
||||
self.on.connected.emit()
|
||||
self.request_credentials()
|
||||
|
||||
def _on_identity_credentials_relation_changed(self, event):
|
||||
"""IdentityCredentials relation changed."""
|
||||
logging.debug("IdentityCredentials on_changed")
|
||||
try:
|
||||
self.on.ready.emit()
|
||||
except (AttributeError, KeyError):
|
||||
logger.exception('Error when emitting event')
|
||||
|
||||
def _on_identity_credentials_relation_broken(self, event):
|
||||
"""IdentityCredentials relation broken."""
|
||||
logging.debug("IdentityCredentials on_broken")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
@property
|
||||
def _identity_credentials_rel(self) -> Relation:
|
||||
"""The IdentityCredentials relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
def get_remote_app_data(self, key: str) -> str:
|
||||
"""Return the value for the given key from remote app data."""
|
||||
data = self._identity_credentials_rel.data[self._identity_credentials_rel.app]
|
||||
return data.get(key)
|
||||
|
||||
@property
|
||||
def api_version(self) -> str:
|
||||
"""Return the api_version."""
|
||||
return self.get_remote_app_data('api-version')
|
||||
|
||||
@property
|
||||
def auth_host(self) -> str:
|
||||
"""Return the auth_host."""
|
||||
return self.get_remote_app_data('auth-host')
|
||||
|
||||
@property
|
||||
def auth_port(self) -> str:
|
||||
"""Return the auth_port."""
|
||||
return self.get_remote_app_data('auth-port')
|
||||
|
||||
@property
|
||||
def auth_protocol(self) -> str:
|
||||
"""Return the auth_protocol."""
|
||||
return self.get_remote_app_data('auth-protocol')
|
||||
|
||||
@property
|
||||
def internal_host(self) -> str:
|
||||
"""Return the internal_host."""
|
||||
return self.get_remote_app_data('internal-host')
|
||||
|
||||
@property
|
||||
def internal_port(self) -> str:
|
||||
"""Return the internal_port."""
|
||||
return self.get_remote_app_data('internal-port')
|
||||
|
||||
@property
|
||||
def internal_protocol(self) -> str:
|
||||
"""Return the internal_protocol."""
|
||||
return self.get_remote_app_data('internal-protocol')
|
||||
|
||||
@property
|
||||
def credentials(self) -> str:
|
||||
return self.get_remote_app_data('credentials')
|
||||
|
||||
@property
|
||||
def username(self) -> str:
|
||||
credentials_id = self.get_remote_app_data('credentials')
|
||||
if not credentials_id:
|
||||
return None
|
||||
|
||||
try:
|
||||
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||
return credentials.get_content().get("username")
|
||||
except SecretNotFoundError:
|
||||
logger.warning(f"Secret {credentials_id} not found")
|
||||
return None
|
||||
|
||||
@property
|
||||
def password(self) -> str:
|
||||
credentials_id = self.get_remote_app_data('credentials')
|
||||
if not credentials_id:
|
||||
return None
|
||||
|
||||
try:
|
||||
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||
return credentials.get_content().get("password")
|
||||
except SecretNotFoundError:
|
||||
logger.warning(f"Secret {credentials_id} not found")
|
||||
return None
|
||||
|
||||
@property
|
||||
def project_name(self) -> str:
|
||||
"""Return the project name."""
|
||||
return self.get_remote_app_data('project-name')
|
||||
|
||||
@property
|
||||
def project_id(self) -> str:
|
||||
"""Return the project id."""
|
||||
return self.get_remote_app_data('project-id')
|
||||
|
||||
@property
|
||||
def user_domain_name(self) -> str:
|
||||
"""Return the name of the user domain."""
|
||||
return self.get_remote_app_data('user-domain-name')
|
||||
|
||||
@property
|
||||
def user_domain_id(self) -> str:
|
||||
"""Return the id of the user domain."""
|
||||
return self.get_remote_app_data('user-domain-id')
|
||||
|
||||
@property
|
||||
def project_domain_name(self) -> str:
|
||||
"""Return the name of the project domain."""
|
||||
return self.get_remote_app_data('project-domain-name')
|
||||
|
||||
@property
|
||||
def project_domain_id(self) -> str:
|
||||
"""Return the id of the project domain."""
|
||||
return self.get_remote_app_data('project-domain-id')
|
||||
|
||||
@property
|
||||
def region(self) -> str:
|
||||
"""Return the region for the auth urls."""
|
||||
return self.get_remote_app_data('region')
|
||||
|
||||
def request_credentials(self) -> None:
|
||||
"""Request credentials from the IdentityCredentials server."""
|
||||
if self.model.unit.is_leader():
|
||||
logging.debug(f'Requesting credentials for {self.charm.app.name}')
|
||||
app_data = self._identity_credentials_rel.data[self.charm.app]
|
||||
app_data['username'] = self.charm.app.name
|
||||
|
||||
|
||||
class HasIdentityCredentialsClientsEvent(EventBase):
|
||||
"""Has IdentityCredentialsClients Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ReadyIdentityCredentialsClientsEvent(EventBase):
|
||||
"""IdentityCredentialsClients Ready Event."""
|
||||
|
||||
def __init__(self, handle, relation_id, relation_name, username):
|
||||
super().__init__(handle)
|
||||
self.relation_id = relation_id
|
||||
self.relation_name = relation_name
|
||||
self.username = username
|
||||
|
||||
def snapshot(self):
|
||||
return {
|
||||
"relation_id": self.relation_id,
|
||||
"relation_name": self.relation_name,
|
||||
"username": self.username,
|
||||
}
|
||||
|
||||
def restore(self, snapshot):
|
||||
super().restore(snapshot)
|
||||
self.relation_id = snapshot["relation_id"]
|
||||
self.relation_name = snapshot["relation_name"]
|
||||
self.username = snapshot["username"]
|
||||
|
||||
|
||||
class IdentityCredentialsClientsGoneAwayEvent(EventBase):
|
||||
"""Has IdentityCredentialsClientsGoneAwayEvent Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityCredentialsClientEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
has_identity_credentials_clients = EventSource(
|
||||
HasIdentityCredentialsClientsEvent
|
||||
)
|
||||
ready_identity_credentials_clients = EventSource(
|
||||
ReadyIdentityCredentialsClientsEvent
|
||||
)
|
||||
identity_credentials_clients_gone = EventSource(
|
||||
IdentityCredentialsClientsGoneAwayEvent
|
||||
)
|
||||
|
||||
|
||||
class IdentityCredentialsProvides(Object):
|
||||
"""
|
||||
IdentityCredentialsProvides class
|
||||
"""
|
||||
|
||||
on = IdentityCredentialsClientEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_identity_credentials_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_credentials_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_identity_credentials_relation_broken,
|
||||
)
|
||||
|
||||
def _on_identity_credentials_relation_joined(self, event):
|
||||
"""Handle IdentityCredentials joined."""
|
||||
logging.debug("IdentityCredentialsProvides on_joined")
|
||||
self.on.has_identity_credentials_clients.emit()
|
||||
|
||||
def _on_identity_credentials_relation_changed(self, event):
|
||||
"""Handle IdentityCredentials changed."""
|
||||
logging.debug("IdentityCredentials on_changed")
|
||||
REQUIRED_KEYS = ['username']
|
||||
|
||||
values = [
|
||||
event.relation.data[event.relation.app].get(k)
|
||||
for k in REQUIRED_KEYS
|
||||
]
|
||||
# Validate data on the relation
|
||||
if all(values):
|
||||
username = event.relation.data[event.relation.app]['username']
|
||||
self.on.ready_identity_credentials_clients.emit(
|
||||
event.relation.id,
|
||||
event.relation.name,
|
||||
username,
|
||||
)
|
||||
|
||||
def _on_identity_credentials_relation_broken(self, event):
|
||||
"""Handle IdentityCredentials broken."""
|
||||
logging.debug("IdentityCredentialsProvides on_departed")
|
||||
self.on.identity_credentials_clients_gone.emit()
|
||||
|
||||
def set_identity_credentials(self, relation_name: int,
|
||||
relation_id: str,
|
||||
api_version: str,
|
||||
auth_host: str,
|
||||
auth_port: str,
|
||||
auth_protocol: str,
|
||||
internal_host: str,
|
||||
internal_port: str,
|
||||
internal_protocol: str,
|
||||
credentials: str,
|
||||
project_name: str,
|
||||
project_id: str,
|
||||
user_domain_name: str,
|
||||
user_domain_id: str,
|
||||
project_domain_name: str,
|
||||
project_domain_id: str,
|
||||
region: str):
|
||||
logging.debug("Setting identity_credentials connection information.")
|
||||
_identity_credentials_rel = None
|
||||
for relation in self.framework.model.relations[relation_name]:
|
||||
if relation.id == relation_id:
|
||||
_identity_credentials_rel = relation
|
||||
if not _identity_credentials_rel:
|
||||
# Relation has disappeared so don't send the data
|
||||
return
|
||||
app_data = _identity_credentials_rel.data[self.charm.app]
|
||||
app_data["api-version"] = api_version
|
||||
app_data["auth-host"] = auth_host
|
||||
app_data["auth-port"] = str(auth_port)
|
||||
app_data["auth-protocol"] = auth_protocol
|
||||
app_data["internal-host"] = internal_host
|
||||
app_data["internal-port"] = str(internal_port)
|
||||
app_data["internal-protocol"] = internal_protocol
|
||||
app_data["credentials"] = credentials
|
||||
app_data["project-name"] = project_name
|
||||
app_data["project-id"] = project_id
|
||||
app_data["user-domain-name"] = user_domain_name
|
||||
app_data["user-domain-id"] = user_domain_id
|
||||
app_data["project-domain-name"] = project_domain_name
|
||||
app_data["project-domain-id"] = project_domain_id
|
||||
app_data["region"] = region
|
@@ -0,0 +1,373 @@
|
||||
"""IdentityResourceProvides and Requires module.
|
||||
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the identity_ops interface.
|
||||
|
||||
Import `IdentityResourceRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "identity_ops"
|
||||
|
||||
Also provide additional parameters to the charm object:
|
||||
- request
|
||||
|
||||
Three events are also available to respond to:
|
||||
- provider_ready
|
||||
- provider_goneaway
|
||||
- response_avaialable
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.keystone_k8s.v0.identity_resource import IdentityResourceRequires
|
||||
|
||||
class IdentityResourceClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# IdentityResource Requires
|
||||
self.identity_resource = IdentityResourceRequires(
|
||||
self, "identity_ops",
|
||||
)
|
||||
self.framework.observe(
|
||||
self.identity_resource.on.provider_ready, self._on_identity_resource_ready)
|
||||
self.framework.observe(
|
||||
self.identity_resource.on.provider_goneaway, self._on_identity_resource_goneaway)
|
||||
self.framework.observe(
|
||||
self.identity_resource.on.response_available, self._on_identity_resource_response)
|
||||
|
||||
def _on_identity_resource_ready(self, event):
|
||||
'''React to the IdentityResource provider_ready event.
|
||||
|
||||
This event happens when n IdentityResource relation is added to the
|
||||
model. Ready to send any ops to keystone.
|
||||
'''
|
||||
# Ready to send any ops.
|
||||
pass
|
||||
|
||||
def _on_identity_resource_response(self, event):
|
||||
'''React to the IdentityResource response_available event.
|
||||
|
||||
The IdentityResource interface will provide the response for the ops sent.
|
||||
'''
|
||||
# Read the response for the ops sent.
|
||||
pass
|
||||
|
||||
def _on_identity_resource_goneaway(self, event):
|
||||
'''React to the IdentityResource goneaway event.
|
||||
|
||||
This event happens when an IdentityResource relation is removed.
|
||||
'''
|
||||
# IdentityResource Relation has goneaway. No ops can be sent.
|
||||
pass
|
||||
```
|
||||
|
||||
A sample ops request can be of format
|
||||
{
|
||||
"id": <request id>
|
||||
"tag": <string to identify request>
|
||||
"ops": [
|
||||
{
|
||||
"name": <op name>,
|
||||
"params": {
|
||||
<param 1>: <value 1>,
|
||||
<param 2>: <value 2>
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
For any sensitive data in the ops params, the charm can create secrets and pass
|
||||
secret id instead of sensitive data as part of ops request. The charm should
|
||||
ensure to grant secret access to provider charm i.e., keystone over relation.
|
||||
The secret content should hold the sensitive data with same name as param name.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from ops.charm import (
|
||||
RelationEvent,
|
||||
)
|
||||
from ops.framework import (
|
||||
EventBase,
|
||||
EventSource,
|
||||
Object,
|
||||
ObjectEvents,
|
||||
StoredState,
|
||||
)
|
||||
from ops.model import (
|
||||
Relation,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "b419d4d8249e423487daafc3665ed06f"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 2
|
||||
|
||||
|
||||
REQUEST_NOT_SENT = 1
|
||||
REQUEST_SENT = 2
|
||||
REQUEST_PROCESSED = 3
|
||||
|
||||
|
||||
class IdentityOpsProviderReadyEvent(RelationEvent):
|
||||
"""Has IdentityOpsProviderReady Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityOpsResponseEvent(RelationEvent):
|
||||
"""Has IdentityOpsResponse Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityOpsProviderGoneAwayEvent(RelationEvent):
|
||||
"""Has IdentityOpsProviderGoneAway Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityResourceResponseEvents(ObjectEvents):
|
||||
"""Events class for `on`."""
|
||||
|
||||
provider_ready = EventSource(IdentityOpsProviderReadyEvent)
|
||||
response_available = EventSource(IdentityOpsResponseEvent)
|
||||
provider_goneaway = EventSource(IdentityOpsProviderGoneAwayEvent)
|
||||
|
||||
|
||||
class IdentityResourceRequires(Object):
|
||||
"""IdentityResourceRequires class."""
|
||||
|
||||
on = IdentityResourceResponseEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self._stored.set_default(provider_ready=False, requests=[])
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_identity_resource_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_resource_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_identity_resource_relation_broken,
|
||||
)
|
||||
|
||||
def _on_identity_resource_relation_joined(self, event):
|
||||
"""Handle IdentityResource joined."""
|
||||
self._stored.provider_ready = True
|
||||
self.on.provider_ready.emit(event.relation)
|
||||
|
||||
def _on_identity_resource_relation_changed(self, event):
|
||||
"""Handle IdentityResource changed."""
|
||||
id_ = self.response.get("id")
|
||||
self.save_request_in_store(id_, None, None, REQUEST_PROCESSED)
|
||||
self.on.response_available.emit(event.relation)
|
||||
|
||||
def _on_identity_resource_relation_broken(self, event):
|
||||
"""Handle IdentityResource broken."""
|
||||
self._stored.provider_ready = False
|
||||
self.on.provider_goneaway.emit(event.relation)
|
||||
|
||||
@property
|
||||
def _identity_resource_rel(self) -> Relation:
|
||||
"""The IdentityResource relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
@property
|
||||
def response(self) -> dict:
|
||||
"""Response object from keystone."""
|
||||
response = self.get_remote_app_data("response")
|
||||
if not response:
|
||||
return {}
|
||||
|
||||
try:
|
||||
return json.loads(response)
|
||||
except Exception as e:
|
||||
logger.debug(str(e))
|
||||
|
||||
return {}
|
||||
|
||||
def save_request_in_store(self, id: str, tag: str, ops: list, state: int):
|
||||
"""Save request in the store."""
|
||||
if id is None:
|
||||
return
|
||||
|
||||
for request in self._stored.requests:
|
||||
if request.get("id") == id:
|
||||
if tag:
|
||||
request["tag"] = tag
|
||||
if ops:
|
||||
request["ops"] = ops
|
||||
request["state"] = state
|
||||
return
|
||||
|
||||
# New request
|
||||
self._stored.requests.append(
|
||||
{"id": id, "tag": tag, "ops": ops, "state": state}
|
||||
)
|
||||
|
||||
def get_request_from_store(self, id: str) -> dict:
|
||||
"""Get request from the stote."""
|
||||
for request in self._stored.requests:
|
||||
if request.get("id") == id:
|
||||
return request
|
||||
|
||||
return {}
|
||||
|
||||
def is_request_processed(self, id: str) -> bool:
|
||||
"""Check if request is processed."""
|
||||
for request in self._stored.requests:
|
||||
if (
|
||||
request.get("id") == id
|
||||
and request.get("state") == REQUEST_PROCESSED
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_remote_app_data(self, key: str) -> str:
|
||||
"""Return the value for the given key from remote app data."""
|
||||
data = self._identity_resource_rel.data[
|
||||
self._identity_resource_rel.app
|
||||
]
|
||||
return data.get(key)
|
||||
|
||||
def ready(self) -> bool:
|
||||
"""Interface is ready or not.
|
||||
|
||||
Interface is considered ready if the op request is processed
|
||||
and response is sent. In case of non leader unit, just consider
|
||||
the interface is ready.
|
||||
"""
|
||||
if not self.model.unit.is_leader():
|
||||
logger.debug("Not a leader unit, set the interface to ready")
|
||||
return True
|
||||
|
||||
try:
|
||||
app_data = self._identity_resource_rel.data[self.charm.app]
|
||||
if "request" not in app_data:
|
||||
return False
|
||||
|
||||
request = json.loads(app_data["request"])
|
||||
request_id = request.get("id")
|
||||
response_id = self.response.get("id")
|
||||
if request_id == response_id:
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.debug(str(e))
|
||||
|
||||
return False
|
||||
|
||||
def request_ops(self, request: dict) -> None:
|
||||
"""Request keystone ops."""
|
||||
if not self.model.unit.is_leader():
|
||||
logger.debug("Not a leader unit, not sending request")
|
||||
return
|
||||
|
||||
id_ = request.get("id")
|
||||
tag = request.get("tag")
|
||||
ops = request.get("ops")
|
||||
req = self.get_request_from_store(id_)
|
||||
if req and req.get("state") == REQUEST_PROCESSED:
|
||||
logger.debug("Request {id_} already processed")
|
||||
return
|
||||
|
||||
if not self._stored.provider_ready:
|
||||
self.save_request_in_store(id_, tag, ops, REQUEST_NOT_SENT)
|
||||
logger.debug("Keystone not yet ready to take requests")
|
||||
return
|
||||
|
||||
logger.debug("Requesting ops to keystone")
|
||||
app_data = self._identity_resource_rel.data[self.charm.app]
|
||||
app_data["request"] = json.dumps(request)
|
||||
self.save_request_in_store(id_, tag, ops, REQUEST_SENT)
|
||||
|
||||
|
||||
class IdentityOpsRequestEvent(EventBase):
|
||||
"""Has IdentityOpsRequest Event."""
|
||||
|
||||
def __init__(self, handle, relation_id, relation_name, request):
|
||||
"""Initialise event."""
|
||||
super().__init__(handle)
|
||||
self.relation_id = relation_id
|
||||
self.relation_name = relation_name
|
||||
self.request = request
|
||||
|
||||
def snapshot(self):
|
||||
"""Snapshot the event."""
|
||||
return {
|
||||
"relation_id": self.relation_id,
|
||||
"relation_name": self.relation_name,
|
||||
"request": self.request,
|
||||
}
|
||||
|
||||
def restore(self, snapshot):
|
||||
"""Restore the event."""
|
||||
super().restore(snapshot)
|
||||
self.relation_id = snapshot["relation_id"]
|
||||
self.relation_name = snapshot["relation_name"]
|
||||
self.request = snapshot["request"]
|
||||
|
||||
|
||||
class IdentityResourceProviderEvents(ObjectEvents):
|
||||
"""Events class for `on`."""
|
||||
|
||||
process_op = EventSource(IdentityOpsRequestEvent)
|
||||
|
||||
|
||||
class IdentityResourceProvides(Object):
|
||||
"""IdentityResourceProvides class."""
|
||||
|
||||
on = IdentityResourceProviderEvents()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_resource_relation_changed,
|
||||
)
|
||||
|
||||
def _on_identity_resource_relation_changed(self, event):
|
||||
"""Handle IdentityResource changed."""
|
||||
request = event.relation.data[event.relation.app].get("request", {})
|
||||
self.on.process_op.emit(
|
||||
event.relation.id, event.relation.name, request
|
||||
)
|
||||
|
||||
def set_ops_response(
|
||||
self, relation_id: str, relation_name: str, ops_response: dict
|
||||
):
|
||||
"""Set response to ops request."""
|
||||
if not self.model.unit.is_leader():
|
||||
logger.debug("Not a leader unit, not sending response")
|
||||
return
|
||||
|
||||
logger.debug("Update response from keystone")
|
||||
_identity_resource_rel = self.charm.model.get_relation(
|
||||
relation_name, relation_id
|
||||
)
|
||||
if not _identity_resource_rel:
|
||||
# Relation has disappeared so skip send of data
|
||||
return
|
||||
|
||||
app_data = _identity_resource_rel.data[self.charm.app]
|
||||
app_data["response"] = json.dumps(ops_response)
|
525
ops-sunbeam/tests/lib/charms/keystone_k8s/v1/identity_service.py
Normal file
525
ops-sunbeam/tests/lib/charms/keystone_k8s/v1/identity_service.py
Normal file
@@ -0,0 +1,525 @@
|
||||
"""IdentityServiceProvides and Requires module.
|
||||
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the identity_service interface.
|
||||
|
||||
Import `IdentityServiceRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "identity_service"
|
||||
|
||||
Also provide additional parameters to the charm object:
|
||||
- service
|
||||
- internal_url
|
||||
- public_url
|
||||
- admin_url
|
||||
- region
|
||||
- username
|
||||
- vhost
|
||||
|
||||
Two events are also available to respond to:
|
||||
- connected
|
||||
- ready
|
||||
- goneaway
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.keystone_k8s.v1.identity_service import IdentityServiceRequires
|
||||
|
||||
class IdentityServiceClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# IdentityService Requires
|
||||
self.identity_service = IdentityServiceRequires(
|
||||
self, "identity_service",
|
||||
service = "my-service"
|
||||
internal_url = "http://internal-url"
|
||||
public_url = "http://public-url"
|
||||
admin_url = "http://admin-url"
|
||||
region = "region"
|
||||
)
|
||||
self.framework.observe(
|
||||
self.identity_service.on.connected, self._on_identity_service_connected)
|
||||
self.framework.observe(
|
||||
self.identity_service.on.ready, self._on_identity_service_ready)
|
||||
self.framework.observe(
|
||||
self.identity_service.on.goneaway, self._on_identity_service_goneaway)
|
||||
|
||||
def _on_identity_service_connected(self, event):
|
||||
'''React to the IdentityService connected event.
|
||||
|
||||
This event happens when n IdentityService relation is added to the
|
||||
model before credentials etc have been provided.
|
||||
'''
|
||||
# Do something before the relation is complete
|
||||
pass
|
||||
|
||||
def _on_identity_service_ready(self, event):
|
||||
'''React to the IdentityService ready event.
|
||||
|
||||
The IdentityService interface will use the provided config for the
|
||||
request to the identity server.
|
||||
'''
|
||||
# IdentityService Relation is ready. Do something with the completed relation.
|
||||
pass
|
||||
|
||||
def _on_identity_service_goneaway(self, event):
|
||||
'''React to the IdentityService goneaway event.
|
||||
|
||||
This event happens when an IdentityService relation is removed.
|
||||
'''
|
||||
# IdentityService Relation has goneaway. shutdown services or suchlike
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from ops.framework import (
|
||||
StoredState,
|
||||
EventBase,
|
||||
ObjectEvents,
|
||||
EventSource,
|
||||
Object,
|
||||
)
|
||||
from ops.model import (
|
||||
Relation,
|
||||
SecretNotFoundError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "0fa7fe7236c14c6e9624acf232b9a3b0"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 1
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IdentityServiceConnectedEvent(EventBase):
|
||||
"""IdentityService connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityServiceReadyEvent(EventBase):
|
||||
"""IdentityService ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityServiceGoneAwayEvent(EventBase):
|
||||
"""IdentityService relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdentityServiceServerEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(IdentityServiceConnectedEvent)
|
||||
ready = EventSource(IdentityServiceReadyEvent)
|
||||
goneaway = EventSource(IdentityServiceGoneAwayEvent)
|
||||
|
||||
|
||||
class IdentityServiceRequires(Object):
|
||||
"""
|
||||
IdentityServiceRequires class
|
||||
"""
|
||||
|
||||
on = IdentityServiceServerEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name: str, service_endpoints: dict,
|
||||
region: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.service_endpoints = service_endpoints
|
||||
self.region = region
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_identity_service_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_service_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_departed,
|
||||
self._on_identity_service_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_identity_service_relation_broken,
|
||||
)
|
||||
|
||||
def _on_identity_service_relation_joined(self, event):
|
||||
"""IdentityService relation joined."""
|
||||
logging.debug("IdentityService on_joined")
|
||||
self.on.connected.emit()
|
||||
self.register_services(
|
||||
self.service_endpoints,
|
||||
self.region)
|
||||
|
||||
def _on_identity_service_relation_changed(self, event):
|
||||
"""IdentityService relation changed."""
|
||||
logging.debug("IdentityService on_changed")
|
||||
try:
|
||||
self.service_password
|
||||
self.on.ready.emit()
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
def _on_identity_service_relation_broken(self, event):
|
||||
"""IdentityService relation broken."""
|
||||
logging.debug("IdentityService on_broken")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
@property
|
||||
def _identity_service_rel(self) -> Relation:
|
||||
"""The IdentityService relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
def get_remote_app_data(self, key: str) -> str:
|
||||
"""Return the value for the given key from remote app data."""
|
||||
data = self._identity_service_rel.data[self._identity_service_rel.app]
|
||||
return data.get(key)
|
||||
|
||||
@property
|
||||
def api_version(self) -> str:
|
||||
"""Return the api_version."""
|
||||
return self.get_remote_app_data('api-version')
|
||||
|
||||
@property
|
||||
def auth_host(self) -> str:
|
||||
"""Return the auth_host."""
|
||||
return self.get_remote_app_data('auth-host')
|
||||
|
||||
@property
|
||||
def auth_port(self) -> str:
|
||||
"""Return the auth_port."""
|
||||
return self.get_remote_app_data('auth-port')
|
||||
|
||||
@property
|
||||
def auth_protocol(self) -> str:
|
||||
"""Return the auth_protocol."""
|
||||
return self.get_remote_app_data('auth-protocol')
|
||||
|
||||
@property
|
||||
def internal_host(self) -> str:
|
||||
"""Return the internal_host."""
|
||||
return self.get_remote_app_data('internal-host')
|
||||
|
||||
@property
|
||||
def internal_port(self) -> str:
|
||||
"""Return the internal_port."""
|
||||
return self.get_remote_app_data('internal-port')
|
||||
|
||||
@property
|
||||
def internal_protocol(self) -> str:
|
||||
"""Return the internal_protocol."""
|
||||
return self.get_remote_app_data('internal-protocol')
|
||||
|
||||
@property
|
||||
def admin_domain_name(self) -> str:
|
||||
"""Return the admin_domain_name."""
|
||||
return self.get_remote_app_data('admin-domain-name')
|
||||
|
||||
@property
|
||||
def admin_domain_id(self) -> str:
|
||||
"""Return the admin_domain_id."""
|
||||
return self.get_remote_app_data('admin-domain-id')
|
||||
|
||||
@property
|
||||
def admin_project_name(self) -> str:
|
||||
"""Return the admin_project_name."""
|
||||
return self.get_remote_app_data('admin-project-name')
|
||||
|
||||
@property
|
||||
def admin_project_id(self) -> str:
|
||||
"""Return the admin_project_id."""
|
||||
return self.get_remote_app_data('admin-project-id')
|
||||
|
||||
@property
|
||||
def admin_user_name(self) -> str:
|
||||
"""Return the admin_user_name."""
|
||||
return self.get_remote_app_data('admin-user-name')
|
||||
|
||||
@property
|
||||
def admin_user_id(self) -> str:
|
||||
"""Return the admin_user_id."""
|
||||
return self.get_remote_app_data('admin-user-id')
|
||||
|
||||
@property
|
||||
def service_domain_name(self) -> str:
|
||||
"""Return the service_domain_name."""
|
||||
return self.get_remote_app_data('service-domain-name')
|
||||
|
||||
@property
|
||||
def service_domain_id(self) -> str:
|
||||
"""Return the service_domain_id."""
|
||||
return self.get_remote_app_data('service-domain-id')
|
||||
|
||||
@property
|
||||
def service_host(self) -> str:
|
||||
"""Return the service_host."""
|
||||
return self.get_remote_app_data('service-host')
|
||||
|
||||
@property
|
||||
def service_credentials(self) -> str:
|
||||
"""Return the service_credentials secret."""
|
||||
return self.get_remote_app_data('service-credentials')
|
||||
|
||||
@property
|
||||
def service_password(self) -> str:
|
||||
"""Return the service_password."""
|
||||
credentials_id = self.get_remote_app_data('service-credentials')
|
||||
if not credentials_id:
|
||||
return None
|
||||
|
||||
try:
|
||||
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||
return credentials.get_content().get("password")
|
||||
except SecretNotFoundError:
|
||||
logger.warning(f"Secret {credentials_id} not found")
|
||||
return None
|
||||
|
||||
@property
|
||||
def service_port(self) -> str:
|
||||
"""Return the service_port."""
|
||||
return self.get_remote_app_data('service-port')
|
||||
|
||||
@property
|
||||
def service_protocol(self) -> str:
|
||||
"""Return the service_protocol."""
|
||||
return self.get_remote_app_data('service-protocol')
|
||||
|
||||
@property
|
||||
def service_project_name(self) -> str:
|
||||
"""Return the service_project_name."""
|
||||
return self.get_remote_app_data('service-project-name')
|
||||
|
||||
@property
|
||||
def service_project_id(self) -> str:
|
||||
"""Return the service_project_id."""
|
||||
return self.get_remote_app_data('service-project-id')
|
||||
|
||||
@property
|
||||
def service_user_name(self) -> str:
|
||||
"""Return the service_user_name."""
|
||||
credentials_id = self.get_remote_app_data('service-credentials')
|
||||
if not credentials_id:
|
||||
return None
|
||||
|
||||
try:
|
||||
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||
return credentials.get_content().get("username")
|
||||
except SecretNotFoundError:
|
||||
logger.warning(f"Secret {credentials_id} not found")
|
||||
return None
|
||||
|
||||
@property
|
||||
def service_user_id(self) -> str:
|
||||
"""Return the service_user_id."""
|
||||
return self.get_remote_app_data('service-user-id')
|
||||
|
||||
@property
|
||||
def internal_auth_url(self) -> str:
|
||||
"""Return the internal_auth_url."""
|
||||
return self.get_remote_app_data('internal-auth-url')
|
||||
|
||||
@property
|
||||
def admin_auth_url(self) -> str:
|
||||
"""Return the admin_auth_url."""
|
||||
return self.get_remote_app_data('admin-auth-url')
|
||||
|
||||
@property
|
||||
def public_auth_url(self) -> str:
|
||||
"""Return the public_auth_url."""
|
||||
return self.get_remote_app_data('public-auth-url')
|
||||
|
||||
@property
|
||||
def admin_role(self) -> str:
|
||||
"""Return the admin_role."""
|
||||
return self.get_remote_app_data('admin-role')
|
||||
|
||||
def register_services(self, service_endpoints: dict,
|
||||
region: str) -> None:
|
||||
"""Request access to the IdentityService server."""
|
||||
if self.model.unit.is_leader():
|
||||
logging.debug("Requesting service registration")
|
||||
app_data = self._identity_service_rel.data[self.charm.app]
|
||||
app_data["service-endpoints"] = json.dumps(
|
||||
service_endpoints, sort_keys=True
|
||||
)
|
||||
app_data["region"] = region
|
||||
|
||||
|
||||
class HasIdentityServiceClientsEvent(EventBase):
|
||||
"""Has IdentityServiceClients Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ReadyIdentityServiceClientsEvent(EventBase):
|
||||
"""IdentityServiceClients Ready Event."""
|
||||
|
||||
def __init__(self, handle, relation_id, relation_name, service_endpoints,
|
||||
region, client_app_name):
|
||||
super().__init__(handle)
|
||||
self.relation_id = relation_id
|
||||
self.relation_name = relation_name
|
||||
self.service_endpoints = service_endpoints
|
||||
self.region = region
|
||||
self.client_app_name = client_app_name
|
||||
|
||||
def snapshot(self):
|
||||
return {
|
||||
"relation_id": self.relation_id,
|
||||
"relation_name": self.relation_name,
|
||||
"service_endpoints": self.service_endpoints,
|
||||
"client_app_name": self.client_app_name,
|
||||
"region": self.region}
|
||||
|
||||
def restore(self, snapshot):
|
||||
super().restore(snapshot)
|
||||
self.relation_id = snapshot["relation_id"]
|
||||
self.relation_name = snapshot["relation_name"]
|
||||
self.service_endpoints = snapshot["service_endpoints"]
|
||||
self.region = snapshot["region"]
|
||||
self.client_app_name = snapshot["client_app_name"]
|
||||
|
||||
|
||||
class IdentityServiceClientEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent)
|
||||
ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent)
|
||||
|
||||
|
||||
class IdentityServiceProvides(Object):
|
||||
"""
|
||||
IdentityServiceProvides class
|
||||
"""
|
||||
|
||||
on = IdentityServiceClientEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_identity_service_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_identity_service_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_identity_service_relation_broken,
|
||||
)
|
||||
|
||||
def _on_identity_service_relation_joined(self, event):
|
||||
"""Handle IdentityService joined."""
|
||||
logging.debug("IdentityService on_joined")
|
||||
self.on.has_identity_service_clients.emit()
|
||||
|
||||
def _on_identity_service_relation_changed(self, event):
|
||||
"""Handle IdentityService changed."""
|
||||
logging.debug("IdentityService on_changed")
|
||||
REQUIRED_KEYS = [
|
||||
'service-endpoints',
|
||||
'region']
|
||||
|
||||
values = [
|
||||
event.relation.data[event.relation.app].get(k)
|
||||
for k in REQUIRED_KEYS
|
||||
]
|
||||
# Validate data on the relation
|
||||
if all(values):
|
||||
service_eps = json.loads(
|
||||
event.relation.data[event.relation.app]['service-endpoints'])
|
||||
self.on.ready_identity_service_clients.emit(
|
||||
event.relation.id,
|
||||
event.relation.name,
|
||||
service_eps,
|
||||
event.relation.data[event.relation.app]['region'],
|
||||
event.relation.app.name)
|
||||
|
||||
def _on_identity_service_relation_broken(self, event):
|
||||
"""Handle IdentityService broken."""
|
||||
logging.debug("IdentityServiceProvides on_departed")
|
||||
# TODO clear data on the relation
|
||||
|
||||
def set_identity_service_credentials(self, relation_name: int,
|
||||
relation_id: str,
|
||||
api_version: str,
|
||||
auth_host: str,
|
||||
auth_port: str,
|
||||
auth_protocol: str,
|
||||
internal_host: str,
|
||||
internal_port: str,
|
||||
internal_protocol: str,
|
||||
service_host: str,
|
||||
service_port: str,
|
||||
service_protocol: str,
|
||||
admin_domain: str,
|
||||
admin_project: str,
|
||||
admin_user: str,
|
||||
service_domain: str,
|
||||
service_project: str,
|
||||
service_user: str,
|
||||
internal_auth_url: str,
|
||||
admin_auth_url: str,
|
||||
public_auth_url: str,
|
||||
service_credentials: str,
|
||||
admin_role: str):
|
||||
logging.debug("Setting identity_service connection information.")
|
||||
_identity_service_rel = None
|
||||
for relation in self.framework.model.relations[relation_name]:
|
||||
if relation.id == relation_id:
|
||||
_identity_service_rel = relation
|
||||
if not _identity_service_rel:
|
||||
# Relation has disappeared so skip send of data
|
||||
return
|
||||
app_data = _identity_service_rel.data[self.charm.app]
|
||||
app_data["api-version"] = api_version
|
||||
app_data["auth-host"] = auth_host
|
||||
app_data["auth-port"] = str(auth_port)
|
||||
app_data["auth-protocol"] = auth_protocol
|
||||
app_data["internal-host"] = internal_host
|
||||
app_data["internal-port"] = str(internal_port)
|
||||
app_data["internal-protocol"] = internal_protocol
|
||||
app_data["service-host"] = service_host
|
||||
app_data["service-port"] = str(service_port)
|
||||
app_data["service-protocol"] = service_protocol
|
||||
app_data["admin-domain-name"] = admin_domain.name
|
||||
app_data["admin-domain-id"] = admin_domain.id
|
||||
app_data["admin-project-name"] = admin_project.name
|
||||
app_data["admin-project-id"] = admin_project.id
|
||||
app_data["admin-user-name"] = admin_user.name
|
||||
app_data["admin-user-id"] = admin_user.id
|
||||
app_data["service-domain-name"] = service_domain.name
|
||||
app_data["service-domain-id"] = service_domain.id
|
||||
app_data["service-project-name"] = service_project.name
|
||||
app_data["service-project-id"] = service_project.id
|
||||
app_data["service-user-id"] = service_user.id
|
||||
app_data["internal-auth-url"] = internal_auth_url
|
||||
app_data["admin-auth-url"] = admin_auth_url
|
||||
app_data["public-auth-url"] = public_auth_url
|
||||
app_data["service-credentials"] = service_credentials
|
||||
app_data["admin-role"] = admin_role
|
@@ -0,0 +1,416 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
# Licensed under the Apache2.0. See LICENSE file in charm source for details.
|
||||
"""Library for the ingress relation.
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the ingress interface.
|
||||
|
||||
Import `IngressRequires` in your charm, with two required options:
|
||||
- "self" (the charm itself)
|
||||
- config_dict
|
||||
|
||||
`config_dict` accepts the following keys:
|
||||
- additional-hostnames
|
||||
- backend-protocol
|
||||
- limit-rps
|
||||
- limit-whitelist
|
||||
- max-body-size
|
||||
- owasp-modsecurity-crs
|
||||
- owasp-modsecurity-custom-rules
|
||||
- path-routes
|
||||
- retry-errors
|
||||
- rewrite-enabled
|
||||
- rewrite-target
|
||||
- service-hostname (required)
|
||||
- service-name (required)
|
||||
- service-namespace
|
||||
- service-port (required)
|
||||
- session-cookie-max-age
|
||||
- tls-secret-name
|
||||
|
||||
See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions
|
||||
of each, along with the required type.
|
||||
|
||||
As an example, add the following to `src/charm.py`:
|
||||
```
|
||||
from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
|
||||
|
||||
# In your charm's `__init__` method (assuming your app is listening on port 8080).
|
||||
self.ingress = IngressRequires(self, {
|
||||
"service-hostname": self.app.name,
|
||||
"service-name": self.app.name,
|
||||
"service-port": 8080,
|
||||
}
|
||||
)
|
||||
```
|
||||
And then add the following to `metadata.yaml`:
|
||||
```
|
||||
requires:
|
||||
ingress:
|
||||
interface: ingress
|
||||
```
|
||||
You _must_ register the IngressRequires class as part of the `__init__` method
|
||||
rather than, for instance, a config-changed event handler, for the relation
|
||||
changed event to be properly handled.
|
||||
|
||||
In the example above we're setting `service-hostname` (which translates to the
|
||||
external hostname for the application when related to nginx-ingress-integrator)
|
||||
to `self.app.name` here. This ensures by default the charm will be available on
|
||||
the name of the deployed juju application, but can be overridden in a
|
||||
production deployment by setting `service-hostname` on the
|
||||
nginx-ingress-integrator charm. For example:
|
||||
```bash
|
||||
juju deploy nginx-ingress-integrator
|
||||
juju deploy my-charm
|
||||
juju relate nginx-ingress-integrator my-charm:ingress
|
||||
# The service is now reachable on the ingress IP(s) of your k8s cluster at
|
||||
# 'http://my-charm'.
|
||||
juju config nginx-ingress-integrator service-hostname='my-charm.example.com'
|
||||
# The service is now reachable on the ingress IP(s) of your k8s cluster at
|
||||
# 'http://my-charm.example.com'.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
from ops.charm import CharmBase, CharmEvents, RelationBrokenEvent, RelationChangedEvent
|
||||
from ops.framework import EventBase, EventSource, Object
|
||||
from ops.model import BlockedStatus
|
||||
|
||||
INGRESS_RELATION_NAME = "ingress"
|
||||
INGRESS_PROXY_RELATION_NAME = "ingress-proxy"
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "db0af4367506491c91663468fb5caa4c"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 17
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
REQUIRED_INGRESS_RELATION_FIELDS = {"service-hostname", "service-name", "service-port"}
|
||||
|
||||
OPTIONAL_INGRESS_RELATION_FIELDS = {
|
||||
"additional-hostnames",
|
||||
"backend-protocol",
|
||||
"limit-rps",
|
||||
"limit-whitelist",
|
||||
"max-body-size",
|
||||
"owasp-modsecurity-crs",
|
||||
"owasp-modsecurity-custom-rules",
|
||||
"path-routes",
|
||||
"retry-errors",
|
||||
"rewrite-target",
|
||||
"rewrite-enabled",
|
||||
"service-namespace",
|
||||
"session-cookie-max-age",
|
||||
"tls-secret-name",
|
||||
}
|
||||
|
||||
RELATION_INTERFACES_MAPPINGS = {
|
||||
"service-hostname": "host",
|
||||
"service-name": "name",
|
||||
"service-namespace": "model",
|
||||
"service-port": "port",
|
||||
}
|
||||
RELATION_INTERFACES_MAPPINGS_VALUES = set(RELATION_INTERFACES_MAPPINGS.values())
|
||||
|
||||
|
||||
class IngressAvailableEvent(EventBase):
|
||||
"""IngressAvailableEvent custom event.
|
||||
|
||||
This event indicates the Ingress provider is available.
|
||||
"""
|
||||
|
||||
|
||||
class IngressProxyAvailableEvent(EventBase):
|
||||
"""IngressProxyAvailableEvent custom event.
|
||||
|
||||
This event indicates the IngressProxy provider is available.
|
||||
"""
|
||||
|
||||
|
||||
class IngressBrokenEvent(RelationBrokenEvent):
|
||||
"""IngressBrokenEvent custom event.
|
||||
|
||||
This event indicates the Ingress provider is broken.
|
||||
"""
|
||||
|
||||
|
||||
class IngressCharmEvents(CharmEvents):
|
||||
"""Custom charm events.
|
||||
|
||||
Attrs:
|
||||
ingress_available: Event to indicate that Ingress is available.
|
||||
ingress_proxy_available: Event to indicate that IngressProxy is available.
|
||||
ingress_broken: Event to indicate that Ingress is broken.
|
||||
"""
|
||||
|
||||
ingress_available = EventSource(IngressAvailableEvent)
|
||||
ingress_proxy_available = EventSource(IngressProxyAvailableEvent)
|
||||
ingress_broken = EventSource(IngressBrokenEvent)
|
||||
|
||||
|
||||
class IngressRequires(Object):
|
||||
"""This class defines the functionality for the 'requires' side of the 'ingress' relation.
|
||||
|
||||
Hook events observed:
|
||||
- relation-changed
|
||||
|
||||
Attrs:
|
||||
model: Juju model where the charm is deployed.
|
||||
config_dict: Contains all the configuration options for Ingress.
|
||||
"""
|
||||
|
||||
def __init__(self, charm: CharmBase, config_dict: Dict) -> None:
|
||||
"""Init function for the IngressRequires class.
|
||||
|
||||
Args:
|
||||
charm: The charm that requires the ingress relation.
|
||||
config_dict: Contains all the configuration options for Ingress.
|
||||
"""
|
||||
super().__init__(charm, INGRESS_RELATION_NAME)
|
||||
|
||||
self.framework.observe(
|
||||
charm.on[INGRESS_RELATION_NAME].relation_changed, self._on_relation_changed
|
||||
)
|
||||
|
||||
# Set default values.
|
||||
default_relation_fields = {
|
||||
"service-namespace": self.model.name,
|
||||
}
|
||||
config_dict.update(
|
||||
(key, value)
|
||||
for key, value in default_relation_fields.items()
|
||||
if key not in config_dict or not config_dict[key]
|
||||
)
|
||||
|
||||
self.config_dict = self._convert_to_relation_interface(config_dict)
|
||||
|
||||
@staticmethod
|
||||
def _convert_to_relation_interface(config_dict: Dict) -> Dict:
|
||||
"""Create a new relation dict that conforms with charm-relation-interfaces.
|
||||
|
||||
Args:
|
||||
config_dict: Ingress configuration that doesn't conform with charm-relation-interfaces.
|
||||
|
||||
Returns:
|
||||
The Ingress configuration conforming with charm-relation-interfaces.
|
||||
"""
|
||||
config_dict = copy.copy(config_dict)
|
||||
config_dict.update(
|
||||
(key, config_dict[old_key])
|
||||
for old_key, key in RELATION_INTERFACES_MAPPINGS.items()
|
||||
if old_key in config_dict and config_dict[old_key]
|
||||
)
|
||||
return config_dict
|
||||
|
||||
def _config_dict_errors(self, config_dict: Dict, update_only: bool = False) -> bool:
|
||||
"""Check our config dict for errors.
|
||||
|
||||
Args:
|
||||
config_dict: Contains all the configuration options for Ingress.
|
||||
update_only: If the charm needs to update only existing keys.
|
||||
|
||||
Returns:
|
||||
If we need to update the config dict or not.
|
||||
"""
|
||||
blocked_message = "Error in ingress relation, check `juju debug-log`"
|
||||
unknown = [
|
||||
config_key
|
||||
for config_key in config_dict
|
||||
if config_key
|
||||
not in REQUIRED_INGRESS_RELATION_FIELDS
|
||||
| OPTIONAL_INGRESS_RELATION_FIELDS
|
||||
| RELATION_INTERFACES_MAPPINGS_VALUES
|
||||
]
|
||||
if unknown:
|
||||
LOGGER.error(
|
||||
"Ingress relation error, unknown key(s) in config dictionary found: %s",
|
||||
", ".join(unknown),
|
||||
)
|
||||
self.model.unit.status = BlockedStatus(blocked_message)
|
||||
return True
|
||||
if not update_only:
|
||||
missing = tuple(
|
||||
config_key
|
||||
for config_key in REQUIRED_INGRESS_RELATION_FIELDS
|
||||
if config_key not in self.config_dict
|
||||
)
|
||||
if missing:
|
||||
LOGGER.error(
|
||||
"Ingress relation error, missing required key(s) in config dictionary: %s",
|
||||
", ".join(sorted(missing)),
|
||||
)
|
||||
self.model.unit.status = BlockedStatus(blocked_message)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _on_relation_changed(self, event: RelationChangedEvent) -> None:
|
||||
"""Handle the relation-changed event.
|
||||
|
||||
Args:
|
||||
event: Event triggering the relation-changed hook for the relation.
|
||||
"""
|
||||
# `self.unit` isn't available here, so use `self.model.unit`.
|
||||
if self.model.unit.is_leader():
|
||||
if self._config_dict_errors(config_dict=self.config_dict):
|
||||
return
|
||||
event.relation.data[self.model.app].update(
|
||||
(key, str(self.config_dict[key])) for key in self.config_dict
|
||||
)
|
||||
|
||||
def update_config(self, config_dict: Dict) -> None:
|
||||
"""Allow for updates to relation.
|
||||
|
||||
Args:
|
||||
config_dict: Contains all the configuration options for Ingress.
|
||||
|
||||
Attrs:
|
||||
config_dict: Contains all the configuration options for Ingress.
|
||||
"""
|
||||
if self.model.unit.is_leader():
|
||||
self.config_dict = self._convert_to_relation_interface(config_dict)
|
||||
if self._config_dict_errors(self.config_dict, update_only=True):
|
||||
return
|
||||
relation = self.model.get_relation(INGRESS_RELATION_NAME)
|
||||
if relation:
|
||||
for key in self.config_dict:
|
||||
relation.data[self.model.app][key] = str(self.config_dict[key])
|
||||
|
||||
|
||||
class IngressBaseProvides(Object):
|
||||
"""Parent class for IngressProvides and IngressProxyProvides.
|
||||
|
||||
Attrs:
|
||||
model: Juju model where the charm is deployed.
|
||||
"""
|
||||
|
||||
def __init__(self, charm: CharmBase, relation_name: str) -> None:
|
||||
"""Init function for the IngressProxyProvides class.
|
||||
|
||||
Args:
|
||||
charm: The charm that provides the ingress-proxy relation.
|
||||
relation_name: The name of the relation.
|
||||
"""
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
|
||||
def _on_relation_changed(self, event: RelationChangedEvent) -> None:
|
||||
"""Handle a change to the ingress/ingress-proxy relation.
|
||||
|
||||
Confirm we have the fields we expect to receive.
|
||||
|
||||
Args:
|
||||
event: Event triggering the relation-changed hook for the relation.
|
||||
"""
|
||||
# `self.unit` isn't available here, so use `self.model.unit`.
|
||||
if not self.model.unit.is_leader():
|
||||
return
|
||||
|
||||
relation_name = event.relation.name
|
||||
|
||||
assert event.app is not None # nosec
|
||||
if not event.relation.data[event.app]:
|
||||
LOGGER.info(
|
||||
"%s hasn't finished configuring, waiting until relation is changed again.",
|
||||
relation_name,
|
||||
)
|
||||
return
|
||||
|
||||
ingress_data = {
|
||||
field: event.relation.data[event.app].get(field)
|
||||
for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
|
||||
}
|
||||
|
||||
missing_fields = sorted(
|
||||
field for field in REQUIRED_INGRESS_RELATION_FIELDS if ingress_data.get(field) is None
|
||||
)
|
||||
|
||||
if missing_fields:
|
||||
LOGGER.warning(
|
||||
"Missing required data fields for %s relation: %s",
|
||||
relation_name,
|
||||
", ".join(missing_fields),
|
||||
)
|
||||
self.model.unit.status = BlockedStatus(
|
||||
f"Missing fields for {relation_name}: {', '.join(missing_fields)}"
|
||||
)
|
||||
|
||||
if relation_name == INGRESS_RELATION_NAME:
|
||||
# Conform to charm-relation-interfaces.
|
||||
if "name" in ingress_data and "port" in ingress_data:
|
||||
name = ingress_data["name"]
|
||||
port = ingress_data["port"]
|
||||
else:
|
||||
name = ingress_data["service-name"]
|
||||
port = ingress_data["service-port"]
|
||||
event.relation.data[self.model.app]["url"] = f"http://{name}:{port}/"
|
||||
|
||||
# Create an event that our charm can use to decide it's okay to
|
||||
# configure the ingress.
|
||||
self.charm.on.ingress_available.emit()
|
||||
elif relation_name == INGRESS_PROXY_RELATION_NAME:
|
||||
self.charm.on.ingress_proxy_available.emit()
|
||||
|
||||
|
||||
class IngressProvides(IngressBaseProvides):
|
||||
"""Class containing the functionality for the 'provides' side of the 'ingress' relation.
|
||||
|
||||
Hook events observed:
|
||||
- relation-changed
|
||||
"""
|
||||
|
||||
def __init__(self, charm: CharmBase) -> None:
|
||||
"""Init function for the IngressProvides class.
|
||||
|
||||
Args:
|
||||
charm: The charm that provides the ingress relation.
|
||||
"""
|
||||
super().__init__(charm, INGRESS_RELATION_NAME)
|
||||
# Observe the relation-changed hook event and bind
|
||||
# self.on_relation_changed() to handle the event.
|
||||
self.framework.observe(
|
||||
charm.on[INGRESS_RELATION_NAME].relation_changed, self._on_relation_changed
|
||||
)
|
||||
self.framework.observe(
|
||||
charm.on[INGRESS_RELATION_NAME].relation_broken, self._on_relation_broken
|
||||
)
|
||||
|
||||
def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
|
||||
"""Handle a relation-broken event in the ingress relation.
|
||||
|
||||
Args:
|
||||
event: Event triggering the relation-broken hook for the relation.
|
||||
"""
|
||||
if not self.model.unit.is_leader():
|
||||
return
|
||||
|
||||
# Create an event that our charm can use to remove the ingress resource.
|
||||
self.charm.on.ingress_broken.emit(event.relation)
|
||||
|
||||
|
||||
class IngressProxyProvides(IngressBaseProvides):
|
||||
"""Class containing the functionality for the 'provides' side of the 'ingress-proxy' relation.
|
||||
|
||||
Hook events observed:
|
||||
- relation-changed
|
||||
"""
|
||||
|
||||
def __init__(self, charm: CharmBase) -> None:
|
||||
"""Init function for the IngressProxyProvides class.
|
||||
|
||||
Args:
|
||||
charm: The charm that provides the ingress-proxy relation.
|
||||
"""
|
||||
super().__init__(charm, INGRESS_PROXY_RELATION_NAME)
|
||||
# Observe the relation-changed hook event and bind
|
||||
# self.on_relation_changed() to handle the event.
|
||||
self.framework.observe(
|
||||
charm.on[INGRESS_PROXY_RELATION_NAME].relation_changed, self._on_relation_changed
|
||||
)
|
206
ops-sunbeam/tests/lib/charms/ovn_central_k8s/v0/ovsdb.py
Normal file
206
ops-sunbeam/tests/lib/charms/ovn_central_k8s/v0/ovsdb.py
Normal file
@@ -0,0 +1,206 @@
|
||||
"""TODO: Add a proper docstring here.
|
||||
|
||||
This is a placeholder docstring for this charm library. Docstrings are
|
||||
presented on Charmhub and updated whenever you push a new version of the
|
||||
library.
|
||||
|
||||
Complete documentation about creating and documenting libraries can be found
|
||||
in the SDK docs at https://juju.is/docs/sdk/libraries.
|
||||
|
||||
See `charmcraft publish-lib` and `charmcraft fetch-lib` for details of how to
|
||||
share and consume charm libraries. They serve to enhance collaboration
|
||||
between charmers. Use a charmer's libraries for classes that handle
|
||||
integration with their charm.
|
||||
|
||||
Bear in mind that new revisions of the different major API versions (v0, v1,
|
||||
v2 etc) are maintained independently. You can continue to update v0 and v1
|
||||
after you have pushed v3.
|
||||
|
||||
Markdown is supported, following the CommonMark specification.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import typing
|
||||
from ops.framework import (
|
||||
StoredState,
|
||||
EventBase,
|
||||
ObjectEvents,
|
||||
EventSource,
|
||||
Object,
|
||||
)
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "114b7bb1970445daa61650e451f9da62"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 3
|
||||
|
||||
|
||||
# TODO: add your code here! Happy coding!
|
||||
class OVSDBCMSConnectedEvent(EventBase):
|
||||
"""OVSDBCMS connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSReadyEvent(EventBase):
|
||||
"""OVSDBCMS ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSGoneAwayEvent(EventBase):
|
||||
"""OVSDBCMS relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSServerEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(OVSDBCMSConnectedEvent)
|
||||
ready = EventSource(OVSDBCMSReadyEvent)
|
||||
goneaway = EventSource(OVSDBCMSGoneAwayEvent)
|
||||
|
||||
|
||||
class OVSDBCMSRequires(Object):
|
||||
"""
|
||||
OVSDBCMSRequires class
|
||||
"""
|
||||
|
||||
on = OVSDBCMSServerEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_ovsdb_cms_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ovsdb_cms_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_departed,
|
||||
self._on_ovsdb_cms_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_ovsdb_cms_relation_broken,
|
||||
)
|
||||
|
||||
def _on_ovsdb_cms_relation_joined(self, event):
|
||||
"""OVSDBCMS relation joined."""
|
||||
logging.debug("OVSDBCMSRequires on_joined")
|
||||
self.on.connected.emit()
|
||||
|
||||
def bound_hostnames(self):
|
||||
return self.get_all_unit_values("bound-hostname")
|
||||
|
||||
def bound_addresses(self):
|
||||
return self.get_all_unit_values("bound-address")
|
||||
|
||||
def remote_ready(self):
|
||||
return all(self.bound_hostnames()) or all(self.bound_addresses())
|
||||
|
||||
def _on_ovsdb_cms_relation_changed(self, event):
|
||||
"""OVSDBCMS relation changed."""
|
||||
logging.debug("OVSDBCMSRequires on_changed")
|
||||
if self.remote_ready():
|
||||
self.on.ready.emit()
|
||||
|
||||
def _on_ovsdb_cms_relation_broken(self, event):
|
||||
"""OVSDBCMS relation broken."""
|
||||
logging.debug("OVSDBCMSRequires on_broken")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
def get_all_unit_values(self, key: str) -> typing.List[str]:
|
||||
"""Retrieve value for key from all related units."""
|
||||
values = []
|
||||
relation = self.framework.model.get_relation(self.relation_name)
|
||||
if relation:
|
||||
for unit in relation.units:
|
||||
values.append(relation.data[unit].get(key))
|
||||
return values
|
||||
|
||||
|
||||
|
||||
class OVSDBCMSClientConnectedEvent(EventBase):
|
||||
"""OVSDBCMS connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSClientReadyEvent(EventBase):
|
||||
"""OVSDBCMS ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSClientGoneAwayEvent(EventBase):
|
||||
"""OVSDBCMS relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OVSDBCMSClientEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(OVSDBCMSClientConnectedEvent)
|
||||
ready = EventSource(OVSDBCMSClientReadyEvent)
|
||||
goneaway = EventSource(OVSDBCMSClientGoneAwayEvent)
|
||||
|
||||
|
||||
class OVSDBCMSProvides(Object):
|
||||
"""
|
||||
OVSDBCMSProvides class
|
||||
"""
|
||||
|
||||
on = OVSDBCMSClientEvents()
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm, relation_name):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_ovsdb_cms_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_ovsdb_cms_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_ovsdb_cms_relation_broken,
|
||||
)
|
||||
|
||||
def _on_ovsdb_cms_relation_joined(self, event):
|
||||
"""Handle ovsdb-cms joined."""
|
||||
logging.debug("OVSDBCMSProvides on_joined")
|
||||
self.on.connected.emit()
|
||||
|
||||
def _on_ovsdb_cms_relation_changed(self, event):
|
||||
"""Handle ovsdb-cms changed."""
|
||||
logging.debug("OVSDBCMSProvides on_changed")
|
||||
self.on.ready.emit()
|
||||
|
||||
def _on_ovsdb_cms_relation_broken(self, event):
|
||||
"""Handle ovsdb-cms broken."""
|
||||
logging.debug("OVSDBCMSProvides on_departed")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
def set_unit_data(self, settings: typing.Dict[str, str]) -> None:
|
||||
"""Publish settings on the peer unit data bag."""
|
||||
relations = self.framework.model.relations[self.relation_name]
|
||||
for relation in relations:
|
||||
for k, v in settings.items():
|
||||
relation.data[self.model.unit][k] = v
|
286
ops-sunbeam/tests/lib/charms/rabbitmq_k8s/v0/rabbitmq.py
Normal file
286
ops-sunbeam/tests/lib/charms/rabbitmq_k8s/v0/rabbitmq.py
Normal file
@@ -0,0 +1,286 @@
|
||||
"""RabbitMQProvides and Requires module.
|
||||
|
||||
This library contains the Requires and Provides classes for handling
|
||||
the rabbitmq interface.
|
||||
|
||||
Import `RabbitMQRequires` in your charm, with the charm object and the
|
||||
relation name:
|
||||
- self
|
||||
- "amqp"
|
||||
|
||||
Also provide two additional parameters to the charm object:
|
||||
- username
|
||||
- vhost
|
||||
|
||||
Two events are also available to respond to:
|
||||
- connected
|
||||
- ready
|
||||
- goneaway
|
||||
|
||||
A basic example showing the usage of this relation follows:
|
||||
|
||||
```
|
||||
from charms.rabbitmq_k8s.v0.rabbitmq import RabbitMQRequires
|
||||
|
||||
class RabbitMQClientCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
# RabbitMQ Requires
|
||||
self.amqp = RabbitMQRequires(
|
||||
self, "amqp",
|
||||
username="myusername",
|
||||
vhost="vhostname"
|
||||
)
|
||||
self.framework.observe(
|
||||
self.amqp.on.connected, self._on_amqp_connected)
|
||||
self.framework.observe(
|
||||
self.amqp.on.ready, self._on_amqp_ready)
|
||||
self.framework.observe(
|
||||
self.amqp.on.goneaway, self._on_amqp_goneaway)
|
||||
|
||||
def _on_amqp_connected(self, event):
|
||||
'''React to the RabbitMQ connected event.
|
||||
|
||||
This event happens when n RabbitMQ relation is added to the
|
||||
model before credentials etc have been provided.
|
||||
'''
|
||||
# Do something before the relation is complete
|
||||
pass
|
||||
|
||||
def _on_amqp_ready(self, event):
|
||||
'''React to the RabbitMQ ready event.
|
||||
|
||||
The RabbitMQ interface will use the provided username and vhost for the
|
||||
request to the rabbitmq server.
|
||||
'''
|
||||
# RabbitMQ Relation is ready. Do something with the completed relation.
|
||||
pass
|
||||
|
||||
def _on_amqp_goneaway(self, event):
|
||||
'''React to the RabbitMQ goneaway event.
|
||||
|
||||
This event happens when an RabbitMQ relation is removed.
|
||||
'''
|
||||
# RabbitMQ Relation has goneaway. shutdown services or suchlike
|
||||
pass
|
||||
```
|
||||
"""
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "45622352791142fd9cf87232e3bd6f2a"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
|
||||
import logging
|
||||
|
||||
from ops.framework import (
|
||||
StoredState,
|
||||
EventBase,
|
||||
ObjectEvents,
|
||||
EventSource,
|
||||
Object,
|
||||
)
|
||||
|
||||
from ops.model import Relation
|
||||
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RabbitMQConnectedEvent(EventBase):
|
||||
"""RabbitMQ connected Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RabbitMQReadyEvent(EventBase):
|
||||
"""RabbitMQ ready for use Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RabbitMQGoneAwayEvent(EventBase):
|
||||
"""RabbitMQ relation has gone-away Event"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RabbitMQServerEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
connected = EventSource(RabbitMQConnectedEvent)
|
||||
ready = EventSource(RabbitMQReadyEvent)
|
||||
goneaway = EventSource(RabbitMQGoneAwayEvent)
|
||||
|
||||
|
||||
class RabbitMQRequires(Object):
|
||||
"""
|
||||
RabbitMQRequires class
|
||||
"""
|
||||
|
||||
on = RabbitMQServerEvents()
|
||||
|
||||
def __init__(self, charm, relation_name: str, username: str, vhost: str):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.username = username
|
||||
self.vhost = vhost
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_amqp_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_amqp_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_departed,
|
||||
self._on_amqp_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_amqp_relation_broken,
|
||||
)
|
||||
|
||||
def _on_amqp_relation_joined(self, event):
|
||||
"""RabbitMQ relation joined."""
|
||||
logging.debug("RabbitMQRabbitMQRequires on_joined")
|
||||
self.on.connected.emit()
|
||||
self.request_access(self.username, self.vhost)
|
||||
|
||||
def _on_amqp_relation_changed(self, event):
|
||||
"""RabbitMQ relation changed."""
|
||||
logging.debug("RabbitMQRabbitMQRequires on_changed/departed")
|
||||
if self.password:
|
||||
self.on.ready.emit()
|
||||
|
||||
def _on_amqp_relation_broken(self, event):
|
||||
"""RabbitMQ relation broken."""
|
||||
logging.debug("RabbitMQRabbitMQRequires on_broken")
|
||||
self.on.goneaway.emit()
|
||||
|
||||
@property
|
||||
def _amqp_rel(self) -> Relation:
|
||||
"""The RabbitMQ relation."""
|
||||
return self.framework.model.get_relation(self.relation_name)
|
||||
|
||||
@property
|
||||
def password(self) -> str:
|
||||
"""Return the RabbitMQ password from the server side of the relation."""
|
||||
return self._amqp_rel.data[self._amqp_rel.app].get("password")
|
||||
|
||||
@property
|
||||
def hostname(self) -> str:
|
||||
"""Return the hostname from the RabbitMQ relation"""
|
||||
return self._amqp_rel.data[self._amqp_rel.app].get("hostname")
|
||||
|
||||
@property
|
||||
def ssl_port(self) -> str:
|
||||
"""Return the SSL port from the RabbitMQ relation"""
|
||||
return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port")
|
||||
|
||||
@property
|
||||
def ssl_ca(self) -> str:
|
||||
"""Return the SSL port from the RabbitMQ relation"""
|
||||
return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca")
|
||||
|
||||
@property
|
||||
def hostnames(self) -> List[str]:
|
||||
"""Return a list of remote RMQ hosts from the RabbitMQ relation"""
|
||||
_hosts = []
|
||||
for unit in self._amqp_rel.units:
|
||||
_hosts.append(self._amqp_rel.data[unit].get("ingress-address"))
|
||||
return _hosts
|
||||
|
||||
def request_access(self, username: str, vhost: str) -> None:
|
||||
"""Request access to the RabbitMQ server."""
|
||||
if self.model.unit.is_leader():
|
||||
logging.debug("Requesting RabbitMQ user and vhost")
|
||||
self._amqp_rel.data[self.charm.app]["username"] = username
|
||||
self._amqp_rel.data[self.charm.app]["vhost"] = vhost
|
||||
|
||||
|
||||
class HasRabbitMQClientsEvent(EventBase):
|
||||
"""Has RabbitMQClients Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ReadyRabbitMQClientsEvent(EventBase):
|
||||
"""RabbitMQClients Ready Event."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RabbitMQClientEvents(ObjectEvents):
|
||||
"""Events class for `on`"""
|
||||
|
||||
has_amqp_clients = EventSource(HasRabbitMQClientsEvent)
|
||||
ready_amqp_clients = EventSource(ReadyRabbitMQClientsEvent)
|
||||
|
||||
|
||||
class RabbitMQProvides(Object):
|
||||
"""
|
||||
RabbitMQProvides class
|
||||
"""
|
||||
|
||||
on = RabbitMQClientEvents()
|
||||
|
||||
def __init__(self, charm, relation_name, callback):
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm = charm
|
||||
self.relation_name = relation_name
|
||||
self.callback = callback
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_joined,
|
||||
self._on_amqp_relation_joined,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_changed,
|
||||
self._on_amqp_relation_changed,
|
||||
)
|
||||
self.framework.observe(
|
||||
self.charm.on[relation_name].relation_broken,
|
||||
self._on_amqp_relation_broken,
|
||||
)
|
||||
|
||||
def _on_amqp_relation_joined(self, event):
|
||||
"""Handle RabbitMQ joined."""
|
||||
logging.debug("RabbitMQRabbitMQProvides on_joined data={}"
|
||||
.format(event.relation.data[event.relation.app]))
|
||||
self.on.has_amqp_clients.emit()
|
||||
|
||||
def _on_amqp_relation_changed(self, event):
|
||||
"""Handle RabbitMQ changed."""
|
||||
logging.debug("RabbitMQRabbitMQProvides on_changed data={}"
|
||||
.format(event.relation.data[event.relation.app]))
|
||||
# Validate data on the relation
|
||||
if self.username(event) and self.vhost(event):
|
||||
self.on.ready_amqp_clients.emit()
|
||||
if self.charm.unit.is_leader():
|
||||
self.callback(event, self.username(event), self.vhost(event))
|
||||
else:
|
||||
logging.warning("Received RabbitMQ changed event without the "
|
||||
"expected keys ('username', 'vhost') in the "
|
||||
"application data bag. Incompatible charm in "
|
||||
"other end of relation?")
|
||||
|
||||
def _on_amqp_relation_broken(self, event):
|
||||
"""Handle RabbitMQ broken."""
|
||||
logging.debug("RabbitMQRabbitMQProvides on_departed")
|
||||
# TODO clear data on the relation
|
||||
|
||||
def username(self, event):
|
||||
"""Return the RabbitMQ username from the client side of the relation."""
|
||||
return event.relation.data[event.relation.app].get("username")
|
||||
|
||||
def vhost(self, event):
|
||||
"""Return the RabbitMQ vhost from the client side of the relation."""
|
||||
return event.relation.data[event.relation.app].get("vhost")
|
734
ops-sunbeam/tests/lib/charms/traefik_k8s/v2/ingress.py
Normal file
734
ops-sunbeam/tests/lib/charms/traefik_k8s/v2/ingress.py
Normal file
@@ -0,0 +1,734 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
r"""# Interface Library for ingress.
|
||||
|
||||
This library wraps relation endpoints using the `ingress` interface
|
||||
and provides a Python API for both requesting and providing per-application
|
||||
ingress, with load-balancing occurring across all units.
|
||||
|
||||
## Getting Started
|
||||
|
||||
To get started using the library, you just need to fetch the library using `charmcraft`.
|
||||
|
||||
```shell
|
||||
cd some-charm
|
||||
charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
||||
```
|
||||
|
||||
In the `metadata.yaml` of the charm, add the following:
|
||||
|
||||
```yaml
|
||||
requires:
|
||||
ingress:
|
||||
interface: ingress
|
||||
limit: 1
|
||||
```
|
||||
|
||||
Then, to initialise the library:
|
||||
|
||||
```python
|
||||
from charms.traefik_k8s.v2.ingress import (IngressPerAppRequirer,
|
||||
IngressPerAppReadyEvent, IngressPerAppRevokedEvent)
|
||||
|
||||
class SomeCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
# ...
|
||||
self.ingress = IngressPerAppRequirer(self, port=80)
|
||||
# The following event is triggered when the ingress URL to be used
|
||||
# by this deployment of the `SomeCharm` is ready (or changes).
|
||||
self.framework.observe(
|
||||
self.ingress.on.ready, self._on_ingress_ready
|
||||
)
|
||||
self.framework.observe(
|
||||
self.ingress.on.revoked, self._on_ingress_revoked
|
||||
)
|
||||
|
||||
def _on_ingress_ready(self, event: IngressPerAppReadyEvent):
|
||||
logger.info("This app's ingress URL: %s", event.url)
|
||||
|
||||
def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent):
|
||||
logger.info("This app no longer has ingress")
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import socket
|
||||
import typing
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
import pydantic
|
||||
from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent
|
||||
from ops.framework import EventSource, Object, ObjectEvents, StoredState
|
||||
from ops.model import ModelError, Relation, Unit
|
||||
from pydantic import AnyHttpUrl, BaseModel, Field, validator
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "e6de2a5cd5b34422a204668f3b8f90d2"
|
||||
|
||||
# Increment this major API version when introducing breaking changes
|
||||
LIBAPI = 2
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 6
|
||||
|
||||
PYDEPS = ["pydantic<2.0"]
|
||||
|
||||
DEFAULT_RELATION_NAME = "ingress"
|
||||
RELATION_INTERFACE = "ingress"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"}
|
||||
|
||||
|
||||
class DatabagModel(BaseModel):
|
||||
"""Base databag model."""
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
allow_population_by_field_name = True
|
||||
"""Allow instantiating this class by field name (instead of forcing alias)."""
|
||||
|
||||
_NEST_UNDER = None
|
||||
|
||||
@classmethod
|
||||
def load(cls, databag: MutableMapping):
|
||||
"""Load this model from a Juju databag."""
|
||||
if cls._NEST_UNDER:
|
||||
return cls.parse_obj(json.loads(databag[cls._NEST_UNDER]))
|
||||
|
||||
try:
|
||||
data = {k: json.loads(v) for k, v in databag.items() if k not in BUILTIN_JUJU_KEYS}
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"invalid databag contents: expecting json. {databag}"
|
||||
log.error(msg)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
try:
|
||||
return cls.parse_raw(json.dumps(data)) # type: ignore
|
||||
except pydantic.ValidationError as e:
|
||||
msg = f"failed to validate databag: {databag}"
|
||||
log.error(msg, exc_info=True)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True):
|
||||
"""Write the contents of this model to Juju databag.
|
||||
|
||||
:param databag: the databag to write the data to.
|
||||
:param clear: ensure the databag is cleared before writing it.
|
||||
"""
|
||||
if clear and databag:
|
||||
databag.clear()
|
||||
|
||||
if databag is None:
|
||||
databag = {}
|
||||
|
||||
if self._NEST_UNDER:
|
||||
databag[self._NEST_UNDER] = self.json()
|
||||
|
||||
dct = self.dict()
|
||||
for key, field in self.__fields__.items(): # type: ignore
|
||||
value = dct[key]
|
||||
databag[field.alias or key] = json.dumps(value)
|
||||
|
||||
return databag
|
||||
|
||||
|
||||
# todo: import these models from charm-relation-interfaces/ingress/v2 instead of redeclaring them
|
||||
class IngressUrl(BaseModel):
|
||||
"""Ingress url schema."""
|
||||
|
||||
url: AnyHttpUrl
|
||||
|
||||
|
||||
class IngressProviderAppData(DatabagModel):
|
||||
"""Ingress application databag schema."""
|
||||
|
||||
ingress: IngressUrl
|
||||
|
||||
|
||||
class ProviderSchema(BaseModel):
|
||||
"""Provider schema for Ingress."""
|
||||
|
||||
app: IngressProviderAppData
|
||||
|
||||
|
||||
class IngressRequirerAppData(DatabagModel):
|
||||
"""Ingress requirer application databag model."""
|
||||
|
||||
model: str = Field(description="The model the application is in.")
|
||||
name: str = Field(description="the name of the app requesting ingress.")
|
||||
port: int = Field(description="The port the app wishes to be exposed.")
|
||||
|
||||
# fields on top of vanilla 'ingress' interface:
|
||||
strip_prefix: Optional[bool] = Field(
|
||||
description="Whether to strip the prefix from the ingress url.", alias="strip-prefix"
|
||||
)
|
||||
redirect_https: Optional[bool] = Field(
|
||||
description="Whether to redirect http traffic to https.", alias="redirect-https"
|
||||
)
|
||||
|
||||
scheme: Optional[str] = Field(
|
||||
default="http", description="What scheme to use in the generated ingress url"
|
||||
)
|
||||
|
||||
@validator("scheme", pre=True)
|
||||
def validate_scheme(cls, scheme): # noqa: N805 # pydantic wants 'cls' as first arg
|
||||
"""Validate scheme arg."""
|
||||
if scheme not in {"http", "https", "h2c"}:
|
||||
raise ValueError("invalid scheme: should be one of `http|https|h2c`")
|
||||
return scheme
|
||||
|
||||
@validator("port", pre=True)
|
||||
def validate_port(cls, port): # noqa: N805 # pydantic wants 'cls' as first arg
|
||||
"""Validate port."""
|
||||
assert isinstance(port, int), type(port)
|
||||
assert 0 < port < 65535, "port out of TCP range"
|
||||
return port
|
||||
|
||||
|
||||
class IngressRequirerUnitData(DatabagModel):
|
||||
"""Ingress requirer unit databag model."""
|
||||
|
||||
host: str = Field(description="Hostname the unit wishes to be exposed.")
|
||||
|
||||
@validator("host", pre=True)
|
||||
def validate_host(cls, host): # noqa: N805 # pydantic wants 'cls' as first arg
|
||||
"""Validate host."""
|
||||
assert isinstance(host, str), type(host)
|
||||
return host
|
||||
|
||||
|
||||
class RequirerSchema(BaseModel):
|
||||
"""Requirer schema for Ingress."""
|
||||
|
||||
app: IngressRequirerAppData
|
||||
unit: IngressRequirerUnitData
|
||||
|
||||
|
||||
class IngressError(RuntimeError):
|
||||
"""Base class for custom errors raised by this library."""
|
||||
|
||||
|
||||
class NotReadyError(IngressError):
|
||||
"""Raised when a relation is not ready."""
|
||||
|
||||
|
||||
class DataValidationError(IngressError):
|
||||
"""Raised when data validation fails on IPU relation data."""
|
||||
|
||||
|
||||
class _IngressPerAppBase(Object):
|
||||
"""Base class for IngressPerUnit interface classes."""
|
||||
|
||||
def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME):
|
||||
super().__init__(charm, relation_name)
|
||||
|
||||
self.charm: CharmBase = charm
|
||||
self.relation_name = relation_name
|
||||
self.app = self.charm.app
|
||||
self.unit = self.charm.unit
|
||||
|
||||
observe = self.framework.observe
|
||||
rel_events = charm.on[relation_name]
|
||||
observe(rel_events.relation_created, self._handle_relation)
|
||||
observe(rel_events.relation_joined, self._handle_relation)
|
||||
observe(rel_events.relation_changed, self._handle_relation)
|
||||
observe(rel_events.relation_broken, self._handle_relation_broken)
|
||||
observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore
|
||||
observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore
|
||||
|
||||
@property
|
||||
def relations(self):
|
||||
"""The list of Relation instances associated with this endpoint."""
|
||||
return list(self.charm.model.relations[self.relation_name])
|
||||
|
||||
def _handle_relation(self, event):
|
||||
"""Subclasses should implement this method to handle a relation update."""
|
||||
pass
|
||||
|
||||
def _handle_relation_broken(self, event):
|
||||
"""Subclasses should implement this method to handle a relation breaking."""
|
||||
pass
|
||||
|
||||
def _handle_upgrade_or_leader(self, event):
|
||||
"""Subclasses should implement this method to handle upgrades or leadership change."""
|
||||
pass
|
||||
|
||||
|
||||
class _IPAEvent(RelationEvent):
|
||||
__args__: Tuple[str, ...] = ()
|
||||
__optional_kwargs__: Dict[str, Any] = {}
|
||||
|
||||
@classmethod
|
||||
def __attrs__(cls):
|
||||
return cls.__args__ + tuple(cls.__optional_kwargs__.keys())
|
||||
|
||||
def __init__(self, handle, relation, *args, **kwargs):
|
||||
super().__init__(handle, relation)
|
||||
|
||||
if not len(self.__args__) == len(args):
|
||||
raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args)))
|
||||
|
||||
for attr, obj in zip(self.__args__, args):
|
||||
setattr(self, attr, obj)
|
||||
for attr, default in self.__optional_kwargs__.items():
|
||||
obj = kwargs.get(attr, default)
|
||||
setattr(self, attr, obj)
|
||||
|
||||
def snapshot(self):
|
||||
dct = super().snapshot()
|
||||
for attr in self.__attrs__():
|
||||
obj = getattr(self, attr)
|
||||
try:
|
||||
dct[attr] = obj
|
||||
except ValueError as e:
|
||||
raise ValueError(
|
||||
"cannot automagically serialize {}: "
|
||||
"override this method and do it "
|
||||
"manually.".format(obj)
|
||||
) from e
|
||||
|
||||
return dct
|
||||
|
||||
def restore(self, snapshot) -> None:
|
||||
super().restore(snapshot)
|
||||
for attr, obj in snapshot.items():
|
||||
setattr(self, attr, obj)
|
||||
|
||||
|
||||
class IngressPerAppDataProvidedEvent(_IPAEvent):
|
||||
"""Event representing that ingress data has been provided for an app."""
|
||||
|
||||
__args__ = ("name", "model", "hosts", "strip_prefix", "redirect_https")
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
name: Optional[str] = None
|
||||
model: Optional[str] = None
|
||||
# sequence of hostname, port dicts
|
||||
hosts: Sequence["IngressRequirerUnitData"] = ()
|
||||
strip_prefix: bool = False
|
||||
redirect_https: bool = False
|
||||
|
||||
|
||||
class IngressPerAppDataRemovedEvent(RelationEvent):
|
||||
"""Event representing that ingress data has been removed for an app."""
|
||||
|
||||
|
||||
class IngressPerAppProviderEvents(ObjectEvents):
|
||||
"""Container for IPA Provider events."""
|
||||
|
||||
data_provided = EventSource(IngressPerAppDataProvidedEvent)
|
||||
data_removed = EventSource(IngressPerAppDataRemovedEvent)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressRequirerData:
|
||||
"""Data exposed by the ingress requirer to the provider."""
|
||||
|
||||
app: "IngressRequirerAppData"
|
||||
units: List["IngressRequirerUnitData"]
|
||||
|
||||
|
||||
class TlsProviderType(typing.Protocol):
|
||||
"""Placeholder."""
|
||||
|
||||
@property
|
||||
def enabled(self) -> bool: # type: ignore
|
||||
"""Placeholder."""
|
||||
|
||||
|
||||
class IngressPerAppProvider(_IngressPerAppBase):
|
||||
"""Implementation of the provider of ingress."""
|
||||
|
||||
on = IngressPerAppProviderEvents() # type: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: CharmBase,
|
||||
relation_name: str = DEFAULT_RELATION_NAME,
|
||||
):
|
||||
"""Constructor for IngressPerAppProvider.
|
||||
|
||||
Args:
|
||||
charm: The charm that is instantiating the instance.
|
||||
relation_name: The name of the relation endpoint to bind to
|
||||
(defaults to "ingress").
|
||||
"""
|
||||
super().__init__(charm, relation_name)
|
||||
|
||||
def _handle_relation(self, event):
|
||||
# created, joined or changed: if remote side has sent the required data:
|
||||
# notify listeners.
|
||||
if self.is_ready(event.relation):
|
||||
data = self.get_data(event.relation)
|
||||
self.on.data_provided.emit( # type: ignore
|
||||
event.relation,
|
||||
data.app.name,
|
||||
data.app.model,
|
||||
[unit.dict() for unit in data.units],
|
||||
data.app.strip_prefix or False,
|
||||
data.app.redirect_https or False,
|
||||
)
|
||||
|
||||
def _handle_relation_broken(self, event):
|
||||
self.on.data_removed.emit(event.relation) # type: ignore
|
||||
|
||||
def wipe_ingress_data(self, relation: Relation):
|
||||
"""Clear ingress data from relation."""
|
||||
assert self.unit.is_leader(), "only leaders can do this"
|
||||
try:
|
||||
relation.data
|
||||
except ModelError as e:
|
||||
log.warning(
|
||||
"error {} accessing relation data for {!r}. "
|
||||
"Probably a ghost of a dead relation is still "
|
||||
"lingering around.".format(e, relation.name)
|
||||
)
|
||||
return
|
||||
del relation.data[self.app]["ingress"]
|
||||
|
||||
def _get_requirer_units_data(self, relation: Relation) -> List["IngressRequirerUnitData"]:
|
||||
"""Fetch and validate the requirer's app databag."""
|
||||
out: List["IngressRequirerUnitData"] = []
|
||||
|
||||
unit: Unit
|
||||
for unit in relation.units:
|
||||
databag = relation.data[unit]
|
||||
try:
|
||||
data = IngressRequirerUnitData.load(databag)
|
||||
out.append(data)
|
||||
except pydantic.ValidationError:
|
||||
log.info(f"failed to validate remote unit data for {unit}")
|
||||
raise
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def _get_requirer_app_data(relation: Relation) -> "IngressRequirerAppData":
|
||||
"""Fetch and validate the requirer's app databag."""
|
||||
app = relation.app
|
||||
if app is None:
|
||||
raise NotReadyError(relation)
|
||||
|
||||
databag = relation.data[app]
|
||||
return IngressRequirerAppData.load(databag)
|
||||
|
||||
def get_data(self, relation: Relation) -> IngressRequirerData:
|
||||
"""Fetch the remote (requirer) app and units' databags."""
|
||||
try:
|
||||
return IngressRequirerData(
|
||||
self._get_requirer_app_data(relation), self._get_requirer_units_data(relation)
|
||||
)
|
||||
except (pydantic.ValidationError, DataValidationError) as e:
|
||||
raise DataValidationError("failed to validate ingress requirer data") from e
|
||||
|
||||
def is_ready(self, relation: Optional[Relation] = None):
|
||||
"""The Provider is ready if the requirer has sent valid data."""
|
||||
if not relation:
|
||||
return any(map(self.is_ready, self.relations))
|
||||
|
||||
try:
|
||||
self.get_data(relation)
|
||||
except (DataValidationError, NotReadyError) as e:
|
||||
log.debug("Provider not ready; validation error encountered: %s" % str(e))
|
||||
return False
|
||||
return True
|
||||
|
||||
def _published_url(self, relation: Relation) -> Optional["IngressProviderAppData"]:
|
||||
"""Fetch and validate this app databag; return the ingress url."""
|
||||
if not self.is_ready(relation) or not self.unit.is_leader():
|
||||
# Handle edge case where remote app name can be missing, e.g.,
|
||||
# relation_broken events.
|
||||
# Also, only leader units can read own app databags.
|
||||
# FIXME https://github.com/canonical/traefik-k8s-operator/issues/34
|
||||
return None
|
||||
|
||||
# fetch the provider's app databag
|
||||
databag = relation.data[self.app]
|
||||
if not databag.get("ingress"):
|
||||
raise NotReadyError("This application did not `publish_url` yet.")
|
||||
|
||||
return IngressProviderAppData.load(databag)
|
||||
|
||||
def publish_url(self, relation: Relation, url: str):
|
||||
"""Publish to the app databag the ingress url."""
|
||||
ingress_url = {"url": url}
|
||||
IngressProviderAppData.parse_obj({"ingress": ingress_url}).dump(relation.data[self.app])
|
||||
|
||||
@property
|
||||
def proxied_endpoints(self) -> Dict[str, str]:
|
||||
"""Returns the ingress settings provided to applications by this IngressPerAppProvider.
|
||||
|
||||
For example, when this IngressPerAppProvider has provided the
|
||||
`http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary
|
||||
will be:
|
||||
|
||||
```
|
||||
{
|
||||
"my-app": {
|
||||
"url": "http://foo.bar/my-model.my-app"
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
results = {}
|
||||
|
||||
for ingress_relation in self.relations:
|
||||
if not ingress_relation.app:
|
||||
log.warning(
|
||||
f"no app in relation {ingress_relation} when fetching proxied endpoints: skipping"
|
||||
)
|
||||
continue
|
||||
try:
|
||||
ingress_data = self._published_url(ingress_relation)
|
||||
except NotReadyError:
|
||||
log.warning(
|
||||
f"no published url found in {ingress_relation}: "
|
||||
f"traefik didn't publish_url yet to this relation."
|
||||
)
|
||||
continue
|
||||
|
||||
if not ingress_data:
|
||||
log.warning(f"relation {ingress_relation} not ready yet: try again in some time.")
|
||||
continue
|
||||
|
||||
results[ingress_relation.app.name] = ingress_data.ingress.dict()
|
||||
return results
|
||||
|
||||
|
||||
class IngressPerAppReadyEvent(_IPAEvent):
|
||||
"""Event representing that ingress for an app is ready."""
|
||||
|
||||
__args__ = ("url",)
|
||||
if typing.TYPE_CHECKING:
|
||||
url: Optional[str] = None
|
||||
|
||||
|
||||
class IngressPerAppRevokedEvent(RelationEvent):
|
||||
"""Event representing that ingress for an app has been revoked."""
|
||||
|
||||
|
||||
class IngressPerAppRequirerEvents(ObjectEvents):
|
||||
"""Container for IPA Requirer events."""
|
||||
|
||||
ready = EventSource(IngressPerAppReadyEvent)
|
||||
revoked = EventSource(IngressPerAppRevokedEvent)
|
||||
|
||||
|
||||
class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
"""Implementation of the requirer of the ingress relation."""
|
||||
|
||||
on = IngressPerAppRequirerEvents() # type: ignore
|
||||
|
||||
# used to prevent spurious urls to be sent out if the event we're currently
|
||||
# handling is a relation-broken one.
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: CharmBase,
|
||||
relation_name: str = DEFAULT_RELATION_NAME,
|
||||
*,
|
||||
host: Optional[str] = None,
|
||||
port: Optional[int] = None,
|
||||
strip_prefix: bool = False,
|
||||
redirect_https: bool = False,
|
||||
# fixme: this is horrible UX.
|
||||
# shall we switch to manually calling provide_ingress_requirements with all args when ready?
|
||||
scheme: typing.Callable[[], str] = lambda: "http",
|
||||
):
|
||||
"""Constructor for IngressRequirer.
|
||||
|
||||
The request args can be used to specify the ingress properties when the
|
||||
instance is created. If any are set, at least `port` is required, and
|
||||
they will be sent to the ingress provider as soon as it is available.
|
||||
All request args must be given as keyword args.
|
||||
|
||||
Args:
|
||||
charm: the charm that is instantiating the library.
|
||||
relation_name: the name of the relation endpoint to bind to (defaults to `ingress`);
|
||||
relation must be of interface type `ingress` and have "limit: 1")
|
||||
host: Hostname to be used by the ingress provider to address the requiring
|
||||
application; if unspecified, the default Kubernetes service name will be used.
|
||||
strip_prefix: configure Traefik to strip the path prefix.
|
||||
redirect_https: redirect incoming requests to HTTPS.
|
||||
scheme: callable returning the scheme to use when constructing the ingress url.
|
||||
|
||||
Request Args:
|
||||
port: the port of the service
|
||||
"""
|
||||
super().__init__(charm, relation_name)
|
||||
self.charm: CharmBase = charm
|
||||
self.relation_name = relation_name
|
||||
self._strip_prefix = strip_prefix
|
||||
self._redirect_https = redirect_https
|
||||
self._get_scheme = scheme
|
||||
|
||||
self._stored.set_default(current_url=None) # type: ignore
|
||||
|
||||
# if instantiated with a port, and we are related, then
|
||||
# we immediately publish our ingress data to speed up the process.
|
||||
if port:
|
||||
self._auto_data = host, port
|
||||
else:
|
||||
self._auto_data = None
|
||||
|
||||
def _handle_relation(self, event):
|
||||
# created, joined or changed: if we have auto data: publish it
|
||||
self._publish_auto_data()
|
||||
|
||||
if self.is_ready():
|
||||
# Avoid spurious events, emit only when there is a NEW URL available
|
||||
new_url = (
|
||||
None
|
||||
if isinstance(event, RelationBrokenEvent)
|
||||
else self._get_url_from_relation_data()
|
||||
)
|
||||
if self._stored.current_url != new_url: # type: ignore
|
||||
self._stored.current_url = new_url # type: ignore
|
||||
self.on.ready.emit(event.relation, new_url) # type: ignore
|
||||
|
||||
def _handle_relation_broken(self, event):
|
||||
self._stored.current_url = None # type: ignore
|
||||
self.on.revoked.emit(event.relation) # type: ignore
|
||||
|
||||
def _handle_upgrade_or_leader(self, event):
|
||||
"""On upgrade/leadership change: ensure we publish the data we have."""
|
||||
self._publish_auto_data()
|
||||
|
||||
def is_ready(self):
|
||||
"""The Requirer is ready if the Provider has sent valid data."""
|
||||
try:
|
||||
return bool(self._get_url_from_relation_data())
|
||||
except DataValidationError as e:
|
||||
log.debug("Requirer not ready; validation error encountered: %s" % str(e))
|
||||
return False
|
||||
|
||||
def _publish_auto_data(self):
|
||||
if self._auto_data:
|
||||
host, port = self._auto_data
|
||||
self.provide_ingress_requirements(host=host, port=port)
|
||||
|
||||
def provide_ingress_requirements(
|
||||
self,
|
||||
*,
|
||||
scheme: Optional[str] = None,
|
||||
host: Optional[str] = None,
|
||||
port: int,
|
||||
):
|
||||
"""Publishes the data that Traefik needs to provide ingress.
|
||||
|
||||
Args:
|
||||
scheme: Scheme to be used; if unspecified, use the one used by __init__.
|
||||
host: Hostname to be used by the ingress provider to address the
|
||||
requirer unit; if unspecified, FQDN will be used instead
|
||||
port: the port of the service (required)
|
||||
"""
|
||||
for relation in self.relations:
|
||||
self._provide_ingress_requirements(scheme, host, port, relation)
|
||||
|
||||
def _provide_ingress_requirements(
|
||||
self,
|
||||
scheme: Optional[str],
|
||||
host: Optional[str],
|
||||
port: int,
|
||||
relation: Relation,
|
||||
):
|
||||
if self.unit.is_leader():
|
||||
self._publish_app_data(scheme, port, relation)
|
||||
|
||||
self._publish_unit_data(host, relation)
|
||||
|
||||
def _publish_unit_data(
|
||||
self,
|
||||
host: Optional[str],
|
||||
relation: Relation,
|
||||
):
|
||||
if not host:
|
||||
host = socket.getfqdn()
|
||||
|
||||
unit_databag = relation.data[self.unit]
|
||||
try:
|
||||
IngressRequirerUnitData(host=host).dump(unit_databag)
|
||||
except pydantic.ValidationError as e:
|
||||
msg = "failed to validate unit data"
|
||||
log.info(msg, exc_info=True) # log to INFO because this might be expected
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
def _publish_app_data(
|
||||
self,
|
||||
scheme: Optional[str],
|
||||
port: int,
|
||||
relation: Relation,
|
||||
):
|
||||
# assumes leadership!
|
||||
app_databag = relation.data[self.app]
|
||||
|
||||
if not scheme:
|
||||
# If scheme was not provided, use the one given to the constructor.
|
||||
scheme = self._get_scheme()
|
||||
|
||||
try:
|
||||
IngressRequirerAppData( # type: ignore # pyright does not like aliases
|
||||
model=self.model.name,
|
||||
name=self.app.name,
|
||||
scheme=scheme,
|
||||
port=port,
|
||||
strip_prefix=self._strip_prefix, # type: ignore # pyright does not like aliases
|
||||
redirect_https=self._redirect_https, # type: ignore # pyright does not like aliases
|
||||
).dump(app_databag)
|
||||
except pydantic.ValidationError as e:
|
||||
msg = "failed to validate app data"
|
||||
log.info(msg, exc_info=True) # log to INFO because this might be expected
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
@property
|
||||
def relation(self):
|
||||
"""The established Relation instance, or None."""
|
||||
return self.relations[0] if self.relations else None
|
||||
|
||||
def _get_url_from_relation_data(self) -> Optional[str]:
|
||||
"""The full ingress URL to reach the current unit.
|
||||
|
||||
Returns None if the URL isn't available yet.
|
||||
"""
|
||||
relation = self.relation
|
||||
if not relation or not relation.app:
|
||||
return None
|
||||
|
||||
# fetch the provider's app databag
|
||||
try:
|
||||
databag = relation.data[relation.app]
|
||||
except ModelError as e:
|
||||
log.debug(
|
||||
f"Error {e} attempting to read remote app data; "
|
||||
f"probably we are in a relation_departed hook"
|
||||
)
|
||||
return None
|
||||
|
||||
if not databag: # not ready yet
|
||||
return None
|
||||
|
||||
return str(IngressProviderAppData.load(databag).ingress.url)
|
||||
|
||||
@property
|
||||
def url(self) -> Optional[str]:
|
||||
"""The full ingress URL to reach the current unit.
|
||||
|
||||
Returns None if the URL isn't available yet.
|
||||
"""
|
||||
data = (
|
||||
typing.cast(Optional[str], self._stored.current_url) # type: ignore
|
||||
or self._get_url_from_relation_data()
|
||||
)
|
||||
return data
|
18
ops-sunbeam/tests/scenario_tests/__init__.py
Normal file
18
ops-sunbeam/tests/scenario_tests/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Unit tests for aso."""
|
||||
import ops.testing
|
||||
|
||||
ops.testing.SIMULATE_CAN_CONNECT = True
|
142
ops-sunbeam/tests/scenario_tests/scenario_utils.py
Normal file
142
ops-sunbeam/tests/scenario_tests/scenario_utils.py
Normal file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for writing sunbeam scenario tests."""
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
|
||||
from scenario import (
|
||||
Relation,
|
||||
Secret,
|
||||
)
|
||||
|
||||
# Data used to create Relation objects. If an incomplete relation is being
|
||||
# created only the 'endpoint', 'interface' and 'remote_app_name' key are
|
||||
# used.
|
||||
default_relations = {
|
||||
"amqp": {
|
||||
"endpoint": "amqp",
|
||||
"interface": "rabbitmq",
|
||||
"remote_app_name": "rabbitmq",
|
||||
"remote_app_data": {"password": "foo"},
|
||||
"remote_units_data": {0: {"ingress-address": "host1"}},
|
||||
},
|
||||
"identity-credentials": {
|
||||
"endpoint": "identity-credentials",
|
||||
"interface": "keystone-credentials",
|
||||
"remote_app_name": "keystone",
|
||||
"remote_app_data": {
|
||||
"api-version": "3",
|
||||
"auth-host": "keystone.local",
|
||||
"auth-port": "12345",
|
||||
"auth-protocol": "http",
|
||||
"internal-host": "keystone.internal",
|
||||
"internal-port": "5000",
|
||||
"internal-protocol": "http",
|
||||
"credentials": "foo",
|
||||
"project-name": "user-project",
|
||||
"project-id": "uproj-id",
|
||||
"user-domain-name": "udomain-name",
|
||||
"user-domain-id": "udomain-id",
|
||||
"project-domain-name": "pdomain_-ame",
|
||||
"project-domain-id": "pdomain-id",
|
||||
"region": "region12",
|
||||
"public-endpoint": "http://10.20.21.11:80/openstack-keystone",
|
||||
"internal-endpoint": "http://10.153.2.45:80/openstack-keystone",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def relation_combinations(
|
||||
metadata, one_missing=False, incomplete_relation=False
|
||||
):
|
||||
"""Based on a charms metadata generate tuples of relations.
|
||||
|
||||
:param metadata: Dict of charm metadata
|
||||
:param one_missing: Bool if set then each unique relations tuple will be
|
||||
missing one relation.
|
||||
:param one_missing: Bool if set then each unique relations tuple will
|
||||
include one relation that has missing relation
|
||||
data
|
||||
"""
|
||||
_incomplete_relations = []
|
||||
_complete_relations = []
|
||||
_relation_pairs = []
|
||||
for rel_name in metadata.get("requires", {}):
|
||||
rel = default_relations[rel_name]
|
||||
complete_relation = Relation(
|
||||
endpoint=rel["endpoint"],
|
||||
interface=rel["interface"],
|
||||
remote_app_name=rel["remote_app_name"],
|
||||
local_unit_data=rel.get("local_unit_data", {}),
|
||||
remote_app_data=rel.get("remote_app_data", {}),
|
||||
remote_units_data=rel.get("remote_units_data", {}),
|
||||
)
|
||||
relation_missing_data = Relation(
|
||||
endpoint=rel["endpoint"],
|
||||
interface=rel["interface"],
|
||||
remote_app_name=rel["remote_app_name"],
|
||||
)
|
||||
_incomplete_relations.append(relation_missing_data)
|
||||
_complete_relations.append(complete_relation)
|
||||
_relation_pairs.append([relation_missing_data, complete_relation])
|
||||
|
||||
if not (one_missing or incomplete_relation):
|
||||
return [tuple(_complete_relations)]
|
||||
if incomplete_relation:
|
||||
relations = list(itertools.product(*_relation_pairs))
|
||||
relations.remove(tuple(_complete_relations))
|
||||
return relations
|
||||
if one_missing:
|
||||
event_count = range(len(_incomplete_relations))
|
||||
else:
|
||||
event_count = range(len(_incomplete_relations) + 1)
|
||||
combinations = []
|
||||
for i in event_count:
|
||||
combinations.extend(
|
||||
list(itertools.combinations(_incomplete_relations, i))
|
||||
)
|
||||
return combinations
|
||||
|
||||
|
||||
missing_relation = functools.partial(
|
||||
relation_combinations, one_missing=True, incomplete_relation=False
|
||||
)
|
||||
incomplete_relation = functools.partial(
|
||||
relation_combinations, one_missing=False, incomplete_relation=True
|
||||
)
|
||||
complete_relation = functools.partial(
|
||||
relation_combinations, one_missing=False, incomplete_relation=False
|
||||
)
|
||||
|
||||
|
||||
def get_keystone_secret_definition(relations):
|
||||
"""Create the keystone identity secret."""
|
||||
ident_rel_id = None
|
||||
secret = None
|
||||
for relation in relations:
|
||||
if relation.remote_app_name == "keystone":
|
||||
ident_rel_id = relation.relation_id
|
||||
if ident_rel_id:
|
||||
secret = Secret(
|
||||
id="foo",
|
||||
contents={0: {"username": "svcuser1", "password": "svcpass1"}},
|
||||
owner="keystone", # or 'app'
|
||||
remote_grants={ident_rel_id: {"my-service/0"}},
|
||||
)
|
||||
return secret
|
192
ops-sunbeam/tests/scenario_tests/test_fixtures.py
Normal file
192
ops-sunbeam/tests/scenario_tests/test_fixtures.py
Normal file
@@ -0,0 +1,192 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Charm definitions for scenatio tests."""
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
import ops_sunbeam.container_handlers as sunbeam_chandlers
|
||||
import ops_sunbeam.core as sunbeam_core
|
||||
|
||||
|
||||
class MyCharm(sunbeam_charm.OSBaseOperatorCharm):
|
||||
"""Test charm for testing OSBaseOperatorCharm."""
|
||||
|
||||
service_name = "my-service"
|
||||
|
||||
|
||||
MyCharm_Metadata = {
|
||||
"name": "my-service",
|
||||
"version": "3",
|
||||
"bases": {"name": "ubuntu", "channel": "20.04/stable"},
|
||||
"tags": ["openstack", "identity", "misc"],
|
||||
"subordinate": False,
|
||||
}
|
||||
|
||||
|
||||
class MyCharmMulti(sunbeam_charm.OSBaseOperatorCharm):
|
||||
"""Test charm for testing OSBaseOperatorCharm."""
|
||||
|
||||
# mandatory_relations = {"amqp", "database", "identity-credentials"}
|
||||
mandatory_relations = {"amqp", "identity-credentials"}
|
||||
service_name = "my-service"
|
||||
|
||||
|
||||
MyCharmMulti_Metadata = {
|
||||
"name": "my-service",
|
||||
"version": "3",
|
||||
"bases": {"name": "ubuntu", "channel": "20.04/stable"},
|
||||
"tags": ["openstack", "identity", "misc"],
|
||||
"subordinate": False,
|
||||
"requires": {
|
||||
# "database": {"interface": "mysql_client", "limit": 1},
|
||||
"amqp": {"interface": "rabbitmq"},
|
||||
"identity-credentials": {
|
||||
"interface": "keystone-credentials",
|
||||
"limit": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class NovaSchedulerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||
"""Pebble handler for Nova scheduler."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.enable_service_check = True
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Nova Scheduler service layer.
|
||||
|
||||
:returns: pebble layer configuration for scheduler service
|
||||
:rtype: dict
|
||||
"""
|
||||
return {
|
||||
"summary": "nova scheduler layer",
|
||||
"description": "pebble configuration for nova services",
|
||||
"services": {
|
||||
"nova-scheduler": {
|
||||
"override": "replace",
|
||||
"summary": "Nova Scheduler",
|
||||
"command": "nova-scheduler",
|
||||
"startup": "enabled",
|
||||
"user": "nova",
|
||||
"group": "nova",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class NovaConductorPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||
"""Pebble handler for Nova Conductor container."""
|
||||
|
||||
def get_layer(self):
|
||||
"""Nova Conductor service.
|
||||
|
||||
:returns: pebble service layer configuration for conductor service
|
||||
:rtype: dict
|
||||
"""
|
||||
return {
|
||||
"summary": "nova conductor layer",
|
||||
"description": "pebble configuration for nova services",
|
||||
"services": {
|
||||
"nova-conductor": {
|
||||
"override": "replace",
|
||||
"summary": "Nova Conductor",
|
||||
"command": "nova-conductor",
|
||||
"startup": "enabled",
|
||||
"user": "nova",
|
||||
"group": "nova",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class MyCharmK8S(sunbeam_charm.OSBaseOperatorCharmK8S):
|
||||
"""Test charm for testing OSBaseOperatorCharm."""
|
||||
|
||||
# mandatory_relations = {"amqp", "database", "identity-credentials"}
|
||||
mandatory_relations = {"amqp", "identity-credentials"}
|
||||
service_name = "my-service"
|
||||
|
||||
def get_pebble_handlers(self):
|
||||
"""Pebble handlers for the operator."""
|
||||
return [
|
||||
NovaSchedulerPebbleHandler(
|
||||
self,
|
||||
"container1",
|
||||
"container1-svc",
|
||||
self.container_configs,
|
||||
"/tmp",
|
||||
self.configure_charm,
|
||||
),
|
||||
NovaConductorPebbleHandler(
|
||||
self,
|
||||
"container2",
|
||||
"container2-svc",
|
||||
self.container_configs,
|
||||
"/tmp",
|
||||
self.configure_charm,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
MyCharmK8S_Metadata = {
|
||||
"name": "my-service",
|
||||
"version": "3",
|
||||
"bases": {"name": "ubuntu", "channel": "20.04/stable"},
|
||||
"tags": ["openstack", "identity", "misc"],
|
||||
"subordinate": False,
|
||||
"containers": {
|
||||
"container1": {"resource": "container1-image"},
|
||||
"container2": {"resource": "container2-image"},
|
||||
},
|
||||
"requires": {
|
||||
# "database": {"interface": "mysql_client", "limit": 1},
|
||||
"amqp": {"interface": "rabbitmq"},
|
||||
"identity-credentials": {
|
||||
"interface": "keystone-credentials",
|
||||
"limit": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class MyCharmK8SAPI(sunbeam_charm.OSBaseOperatorCharmK8S):
|
||||
"""Test charm for testing OSBaseOperatorCharm."""
|
||||
|
||||
# mandatory_relations = {"amqp", "database", "identity-credentials"}
|
||||
mandatory_relations = {"amqp", "identity-credentials"}
|
||||
service_name = "my-service"
|
||||
|
||||
|
||||
MyCharmK8SAPI_Metadata = {
|
||||
"name": "my-service",
|
||||
"version": "3",
|
||||
"bases": {"name": "ubuntu", "channel": "20.04/stable"},
|
||||
"tags": ["openstack", "identity", "misc"],
|
||||
"subordinate": False,
|
||||
"containers": {
|
||||
"my-service": {"resource": "container1-image"},
|
||||
},
|
||||
"requires": {
|
||||
# "database": {"interface": "mysql_client", "limit": 1},
|
||||
"amqp": {"interface": "rabbitmq"},
|
||||
"identity-credentials": {
|
||||
"interface": "keystone-credentials",
|
||||
},
|
||||
},
|
||||
}
|
383
ops-sunbeam/tests/scenario_tests/test_scenario.py
Normal file
383
ops-sunbeam/tests/scenario_tests/test_scenario.py
Normal file
@@ -0,0 +1,383 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test charms for unit tests."""
|
||||
from . import test_fixtures
|
||||
from . import scenario_utils as utils
|
||||
import re
|
||||
import sys
|
||||
|
||||
sys.path.append("tests/lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
import pytest
|
||||
from scenario import (
|
||||
State,
|
||||
Context,
|
||||
Container,
|
||||
Mount,
|
||||
)
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
MaintenanceStatus,
|
||||
)
|
||||
|
||||
|
||||
class TestOSBaseOperatorCharmScenarios:
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
def test_no_relations(self, leader):
|
||||
"""Check charm with no relations becomes active."""
|
||||
state = State(leader=leader, config={}, containers=[])
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharm,
|
||||
meta=test_fixtures.MyCharm_Metadata,
|
||||
)
|
||||
out = ctxt.run("install", state)
|
||||
assert out.unit_status == MaintenanceStatus(
|
||||
"(bootstrap) Service not bootstrapped"
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status == ActiveStatus("")
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.missing_relation(test_fixtures.MyCharmMulti_Metadata),
|
||||
)
|
||||
def test_relation_missing(self, relations, leader):
|
||||
"""Check charm with a missing relation is blocked."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmMulti,
|
||||
meta=test_fixtures.MyCharmMulti_Metadata,
|
||||
)
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "blocked"
|
||||
assert re.match(r".*integration missing", out.unit_status.message)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.incomplete_relation(test_fixtures.MyCharmMulti_Metadata),
|
||||
)
|
||||
def test_relation_incomplete(self, relations, leader):
|
||||
"""Check charm with an incomplete relation is waiting."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmMulti,
|
||||
meta=test_fixtures.MyCharmMulti_Metadata,
|
||||
)
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "waiting"
|
||||
assert re.match(
|
||||
r".*Not all relations are ready", out.unit_status.message
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.complete_relation(test_fixtures.MyCharmMulti_Metadata),
|
||||
)
|
||||
def test_relations_complete(self, relations, leader):
|
||||
"""Check charm with complete relations is active."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmMulti,
|
||||
meta=test_fixtures.MyCharmMulti_Metadata,
|
||||
)
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status == ActiveStatus("")
|
||||
|
||||
|
||||
class TestOSBaseOperatorCharmK8SScenarios:
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations", utils.missing_relation(test_fixtures.MyCharmK8S_Metadata)
|
||||
)
|
||||
def test_relation_missing(self, tmp_path, relations, leader):
|
||||
"""Check k8s charm with a missing relation is blocked."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8S,
|
||||
meta=test_fixtures.MyCharmK8S_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
p2 = tmp_path / "c2"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="container1",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
),
|
||||
Container(
|
||||
name="container2",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p2)},
|
||||
),
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert re.match(r".*integration missing", out.unit_status.message)
|
||||
assert out.unit_status.name == "blocked"
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.incomplete_relation(test_fixtures.MyCharmK8S_Metadata),
|
||||
)
|
||||
def test_relation_incomplete(self, tmp_path, relations, leader):
|
||||
"""Check k8s charm with an incomplete relation is waiting."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8S,
|
||||
meta=test_fixtures.MyCharmK8S_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
p2 = tmp_path / "c2"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="container1",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
),
|
||||
Container(
|
||||
name="container2",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p2)},
|
||||
),
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "waiting"
|
||||
assert re.match(
|
||||
r".*Not all relations are ready", out.unit_status.message
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations", utils.complete_relation(test_fixtures.MyCharmK8S_Metadata)
|
||||
)
|
||||
def test_relation_container_not_ready(self, tmp_path, relations, leader):
|
||||
"""Check k8s charm with container is cannot connect to it waiting ."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8S,
|
||||
meta=test_fixtures.MyCharmK8S_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
p2 = tmp_path / "c2"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="container1",
|
||||
can_connect=False,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
),
|
||||
Container(
|
||||
name="container2",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p2)},
|
||||
),
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "waiting"
|
||||
assert re.match(
|
||||
r".*Payload container not ready", out.unit_status.message
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations", utils.complete_relation(test_fixtures.MyCharmK8S_Metadata)
|
||||
)
|
||||
def test_relation_all_complete(self, tmp_path, relations, leader):
|
||||
"""Check k8s charm with complete rels & ready containers is active."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8S,
|
||||
meta=test_fixtures.MyCharmK8S_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
p2 = tmp_path / "c2"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="container1",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
),
|
||||
Container(
|
||||
name="container2",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p2)},
|
||||
),
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status == ActiveStatus("")
|
||||
|
||||
|
||||
class TestOSBaseOperatorCharmK8SAPIScenarios:
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.missing_relation(test_fixtures.MyCharmK8SAPI_Metadata),
|
||||
)
|
||||
def test_relation_missing(self, tmp_path, relations, leader):
|
||||
"""Check k8s API charm with a missing relation is blocked."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8SAPI,
|
||||
meta=test_fixtures.MyCharmK8SAPI_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="my-service",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
)
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert re.match(r".*integration missing", out.unit_status.message)
|
||||
assert out.unit_status.name == "blocked"
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.incomplete_relation(test_fixtures.MyCharmK8SAPI_Metadata),
|
||||
)
|
||||
def test_relation_incomplete(self, tmp_path, relations, leader):
|
||||
"""Check k8s API charm with an incomplete relation is waiting."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8SAPI,
|
||||
meta=test_fixtures.MyCharmK8SAPI_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="my-service",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
)
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "waiting"
|
||||
assert re.match(
|
||||
r".*Not all relations are ready", out.unit_status.message
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.complete_relation(test_fixtures.MyCharmK8SAPI_Metadata),
|
||||
)
|
||||
def test_relation_container_not_ready(self, tmp_path, relations, leader):
|
||||
"""Check k8s API charm with stopped container is waiting."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8SAPI,
|
||||
meta=test_fixtures.MyCharmK8SAPI_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="my-service",
|
||||
can_connect=False,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
)
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status.name == "waiting"
|
||||
assert re.match(
|
||||
r".*Payload container not ready", out.unit_status.message
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("leader", (True, False))
|
||||
@pytest.mark.parametrize(
|
||||
"relations",
|
||||
utils.complete_relation(test_fixtures.MyCharmK8SAPI_Metadata),
|
||||
)
|
||||
def test_relation_all_complete(self, tmp_path, relations, leader):
|
||||
"""Check k8s API charm all rels and containers are ready."""
|
||||
ctxt = Context(
|
||||
charm_type=test_fixtures.MyCharmK8SAPI,
|
||||
meta=test_fixtures.MyCharmK8SAPI_Metadata,
|
||||
)
|
||||
p1 = tmp_path / "c1"
|
||||
state = State(
|
||||
leader=True,
|
||||
config={},
|
||||
containers=[
|
||||
Container(
|
||||
name="my-service",
|
||||
can_connect=True,
|
||||
mounts={"local": Mount("/etc", p1)},
|
||||
)
|
||||
],
|
||||
relations=list(relations),
|
||||
secrets=[utils.get_keystone_secret_definition(relations)],
|
||||
)
|
||||
out = ctxt.run("config-changed", state)
|
||||
assert out.unit_status == ActiveStatus("")
|
18
ops-sunbeam/tests/unit_tests/__init__.py
Normal file
18
ops-sunbeam/tests/unit_tests/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Unit tests for aso."""
|
||||
import ops.testing
|
||||
|
||||
ops.testing.SIMULATE_CAN_CONNECT = True
|
360
ops-sunbeam/tests/unit_tests/test_charms.py
Normal file
360
ops-sunbeam/tests/unit_tests/test_charms.py
Normal file
@@ -0,0 +1,360 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test charms for unit tests."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import ops.framework
|
||||
|
||||
from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
sys.path.append("tests/unit_tests/lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
import ops_sunbeam.container_handlers as sunbeam_chandlers
|
||||
|
||||
CHARM_CONFIG = """
|
||||
options:
|
||||
debug:
|
||||
default: True
|
||||
description: Enable debug logging.
|
||||
type: boolean
|
||||
region:
|
||||
default: RegionOne
|
||||
description: Region
|
||||
type: string
|
||||
"""
|
||||
|
||||
INITIAL_CHARM_CONFIG = {"debug": "true", "region": "RegionOne"}
|
||||
|
||||
CHARM_METADATA = """
|
||||
name: my-service
|
||||
version: 3
|
||||
bases:
|
||||
- name: ubuntu
|
||||
channel: 20.04/stable
|
||||
tags:
|
||||
- openstack
|
||||
- identity
|
||||
- misc
|
||||
|
||||
subordinate: false
|
||||
"""
|
||||
|
||||
CHARM_METADATA_K8S = (
|
||||
CHARM_METADATA
|
||||
+ """
|
||||
containers:
|
||||
my-service:
|
||||
resource: mysvc-image
|
||||
mounts:
|
||||
- storage: db
|
||||
location: /var/lib/mysvc
|
||||
|
||||
storage:
|
||||
logs:
|
||||
type: filesystem
|
||||
db:
|
||||
type: filesystem
|
||||
|
||||
resources:
|
||||
mysvc-image:
|
||||
type: oci-image
|
||||
"""
|
||||
)
|
||||
|
||||
API_CHARM_METADATA = """
|
||||
name: my-service
|
||||
version: 3
|
||||
bases:
|
||||
- name: ubuntu
|
||||
channel: 20.04/stable
|
||||
tags:
|
||||
- openstack
|
||||
- identity
|
||||
- misc
|
||||
|
||||
subordinate: false
|
||||
|
||||
requires:
|
||||
database:
|
||||
interface: mysql_client
|
||||
limit: 1
|
||||
ingress-internal:
|
||||
interface: ingress
|
||||
limit: 1
|
||||
ingress-public:
|
||||
interface: ingress
|
||||
limit: 1
|
||||
amqp:
|
||||
interface: rabbitmq
|
||||
identity-service:
|
||||
interface: keystone
|
||||
identity-credentials:
|
||||
interface: keystone-credentials
|
||||
limit: 1
|
||||
ceph-access:
|
||||
interface: cinder-ceph-key
|
||||
|
||||
peers:
|
||||
peers:
|
||||
interface: mysvc-peer
|
||||
|
||||
containers:
|
||||
my-service:
|
||||
resource: mysvc-image
|
||||
mounts:
|
||||
- storage: db
|
||||
location: /var/lib/mysvc
|
||||
|
||||
storage:
|
||||
logs:
|
||||
type: filesystem
|
||||
db:
|
||||
type: filesystem
|
||||
|
||||
resources:
|
||||
mysvc-image:
|
||||
type: oci-image
|
||||
"""
|
||||
|
||||
|
||||
class MyCharm(sunbeam_charm.OSBaseOperatorCharm):
|
||||
"""Test charm for testing OSBaseOperatorCharm."""
|
||||
|
||||
service_name = "my-service"
|
||||
|
||||
def __init__(self, framework: "ops.framework.Framework") -> None:
|
||||
"""Run constructor."""
|
||||
self.seen_events = []
|
||||
self.render_calls = []
|
||||
self._template_dir = self._setup_templates()
|
||||
super().__init__(framework)
|
||||
|
||||
def _log_event(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log events."""
|
||||
self.seen_events.append(type(event).__name__)
|
||||
|
||||
def _on_config_changed(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log config changed event."""
|
||||
self._log_event(event)
|
||||
super()._on_config_changed(event)
|
||||
|
||||
def configure_charm(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log configure_charm call."""
|
||||
self._log_event(event)
|
||||
super().configure_charm(event)
|
||||
|
||||
@property
|
||||
def public_ingress_port(self) -> int:
|
||||
"""Charms default port."""
|
||||
return 789
|
||||
|
||||
def _setup_templates(self) -> str:
|
||||
"""Run temp templates dir setup."""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
_template_dir = f"{tmpdir}/templates"
|
||||
os.mkdir(_template_dir)
|
||||
with open(f"{_template_dir}/my-service.conf.j2", "w") as f:
|
||||
f.write("")
|
||||
return _template_dir
|
||||
|
||||
@property
|
||||
def template_dir(self) -> str:
|
||||
"""Temp templates dir."""
|
||||
return self._template_dir
|
||||
|
||||
|
||||
TEMPLATE_CONTENTS = """
|
||||
{{ wsgi_config.wsgi_admin_script }}
|
||||
{{ database.database_password }}
|
||||
{{ options.debug }}
|
||||
{{ amqp.transport_url }}
|
||||
{{ amqp.hostname }}
|
||||
{{ identity_service.service_password }}
|
||||
{{ peers.foo }}
|
||||
"""
|
||||
|
||||
|
||||
class MyCharmK8S(sunbeam_charm.OSBaseOperatorCharmK8S):
|
||||
"""Test charm for k8s."""
|
||||
|
||||
service_name = "my-service"
|
||||
|
||||
def __init__(self, framework: "ops.framework.Framework") -> None:
|
||||
"""Run constructor."""
|
||||
self.seen_events = []
|
||||
self.render_calls = []
|
||||
self._template_dir = self._setup_templates()
|
||||
super().__init__(framework)
|
||||
|
||||
def _log_event(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log events."""
|
||||
self.seen_events.append(type(event).__name__)
|
||||
|
||||
def _on_config_changed(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log config changed event."""
|
||||
self._log_event(event)
|
||||
super()._on_config_changed(event)
|
||||
|
||||
def configure_charm(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log configure_charm call."""
|
||||
self._log_event(event)
|
||||
super().configure_charm(event)
|
||||
|
||||
@property
|
||||
def public_ingress_port(self) -> int:
|
||||
"""Charms default port."""
|
||||
return 789
|
||||
|
||||
def _setup_templates(self) -> str:
|
||||
"""Run temp templates dir setup."""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
_template_dir = f"{tmpdir}/templates"
|
||||
os.mkdir(_template_dir)
|
||||
with open(f"{_template_dir}/my-service.conf.j2", "w") as f:
|
||||
f.write("")
|
||||
return _template_dir
|
||||
|
||||
def _on_service_pebble_ready(
|
||||
self, event: "ops.framework.EventBase"
|
||||
) -> None:
|
||||
"""Log pebble ready event."""
|
||||
self._log_event(event)
|
||||
super()._on_service_pebble_ready(event)
|
||||
|
||||
|
||||
class MyAPICharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
"""Test charm for testing OSBaseOperatorAPICharm."""
|
||||
|
||||
service_name = "my-service"
|
||||
wsgi_admin_script = "/bin/wsgi_admin"
|
||||
wsgi_public_script = "/bin/wsgi_public"
|
||||
mandatory_relations = {
|
||||
"database",
|
||||
"amqp",
|
||||
"identity-service",
|
||||
"ingress-public",
|
||||
}
|
||||
|
||||
def __init__(self, framework: "ops.framework.Framework") -> None:
|
||||
"""Run constructor."""
|
||||
self.seen_events = []
|
||||
self.render_calls = []
|
||||
self._template_dir = self._setup_templates()
|
||||
super().__init__(framework)
|
||||
|
||||
def _setup_templates(self) -> str:
|
||||
"""Run temp templates dir setup."""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
_template_dir = f"{tmpdir}/templates"
|
||||
os.mkdir(_template_dir)
|
||||
with open(f"{_template_dir}/my-service.conf.j2", "w") as f:
|
||||
f.write(TEMPLATE_CONTENTS)
|
||||
with open(f"{_template_dir}/wsgi-my-service.conf.j2", "w") as f:
|
||||
f.write(TEMPLATE_CONTENTS)
|
||||
return _template_dir
|
||||
|
||||
def _log_event(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log events."""
|
||||
self.seen_events.append(type(event).__name__)
|
||||
|
||||
def _on_service_pebble_ready(
|
||||
self, event: "ops.framework.EventBase"
|
||||
) -> None:
|
||||
"""Log pebble ready event."""
|
||||
self._log_event(event)
|
||||
super()._on_service_pebble_ready(event)
|
||||
|
||||
def _on_config_changed(self, event: "ops.framework.EventBase") -> None:
|
||||
"""Log config changed event."""
|
||||
self._log_event(event)
|
||||
super()._on_config_changed(event)
|
||||
|
||||
@property
|
||||
def default_public_ingress_port(self) -> int:
|
||||
"""Charms default port."""
|
||||
return 789
|
||||
|
||||
@property
|
||||
def template_dir(self) -> str:
|
||||
"""Templates dir."""
|
||||
return self._template_dir
|
||||
|
||||
@property
|
||||
def healthcheck_http_url(self) -> str:
|
||||
"""Healthcheck HTTP URL for the service."""
|
||||
return f"http://localhost:{self.default_public_ingress_port}/v3"
|
||||
|
||||
@property
|
||||
def healthcheck_http_timeout(self) -> str:
|
||||
"""Healthcheck HTTP timeout for the service."""
|
||||
return "5s"
|
||||
|
||||
|
||||
class MultiSvcPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||
"""Test pebble handler for multi service charm."""
|
||||
|
||||
def get_layer(self) -> dict:
|
||||
"""Glance API service pebble layer.
|
||||
|
||||
:returns: pebble layer configuration for glance api service
|
||||
"""
|
||||
return {
|
||||
"summary": f"{self.service_name} layer",
|
||||
"description": "pebble config layer for glance api service",
|
||||
"services": {
|
||||
f"{self.service_name}": {
|
||||
"override": "replace",
|
||||
"summary": f"{self.service_name} standalone",
|
||||
"command": "/usr/bin/glance-api",
|
||||
"startup": "disabled",
|
||||
},
|
||||
"apache forwarder": {
|
||||
"override": "replace",
|
||||
"summary": "apache",
|
||||
"command": "/usr/sbin/apache2ctl -DFOREGROUND",
|
||||
"startup": "disabled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class TestMultiSvcCharm(MyAPICharm):
|
||||
"""Test class of multi service charm."""
|
||||
|
||||
def get_pebble_handlers(self) -> List[sunbeam_chandlers.PebbleHandler]:
|
||||
"""Pebble handlers for the service."""
|
||||
return [
|
||||
MultiSvcPebbleHandler(
|
||||
self,
|
||||
self.service_name,
|
||||
self.service_name,
|
||||
self.container_configs,
|
||||
self.template_dir,
|
||||
self.configure_charm,
|
||||
)
|
||||
]
|
189
ops-sunbeam/tests/unit_tests/test_compound_status.py
Normal file
189
ops-sunbeam/tests/unit_tests/test_compound_status.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test compound_status."""
|
||||
|
||||
import sys
|
||||
|
||||
import mock
|
||||
|
||||
sys.path.append("lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
from ops.model import (
|
||||
ActiveStatus,
|
||||
BlockedStatus,
|
||||
UnknownStatus,
|
||||
WaitingStatus,
|
||||
)
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
import ops_sunbeam.compound_status as compound_status
|
||||
import ops_sunbeam.test_utils as test_utils
|
||||
|
||||
from . import (
|
||||
test_charms,
|
||||
)
|
||||
|
||||
|
||||
class TestCompoundStatus(test_utils.CharmTestCase):
|
||||
"""Test for the compound_status module."""
|
||||
|
||||
PATCHES = []
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setup."""
|
||||
self.container_calls = test_utils.ContainerCalls()
|
||||
super().setUp(sunbeam_charm, self.PATCHES)
|
||||
self.harness = test_utils.get_harness(
|
||||
test_charms.MyCharmK8S,
|
||||
test_charms.CHARM_METADATA_K8S,
|
||||
self.container_calls,
|
||||
charm_config=test_charms.CHARM_CONFIG,
|
||||
initial_charm_config=test_charms.INITIAL_CHARM_CONFIG,
|
||||
)
|
||||
self.harness.begin()
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
|
||||
def test_status_triggering_on_set(self) -> None:
|
||||
"""Updating a status should call the on_update function if set."""
|
||||
status = compound_status.Status("test")
|
||||
|
||||
# this shouldn't fail, even though it's not connected to a pool yet,
|
||||
# and thus has no on_update set.
|
||||
status.set(WaitingStatus("test"))
|
||||
|
||||
# manually set the on_update hook and verify it is called
|
||||
on_update_mock = mock.Mock()
|
||||
status.on_update = on_update_mock
|
||||
status.set(ActiveStatus("test"))
|
||||
on_update_mock.assert_called_once_with()
|
||||
|
||||
def test_status_new_unknown_message(self) -> None:
|
||||
"""New status should be unknown status and empty message."""
|
||||
status = compound_status.Status("test")
|
||||
self.assertIsInstance(status.status, UnknownStatus)
|
||||
self.assertEqual(status.message(), "")
|
||||
|
||||
def test_serializing_status(self) -> None:
|
||||
"""Serialising a status should work as expected."""
|
||||
status = compound_status.Status("mylabel")
|
||||
self.assertEqual(
|
||||
status._serialize(),
|
||||
{
|
||||
"status": "unknown",
|
||||
"message": "",
|
||||
},
|
||||
)
|
||||
|
||||
# now with a message and new status
|
||||
status.set(WaitingStatus("still waiting..."))
|
||||
self.assertEqual(
|
||||
status._serialize(),
|
||||
{
|
||||
"status": "waiting",
|
||||
"message": "still waiting...",
|
||||
},
|
||||
)
|
||||
|
||||
# with a custom priority
|
||||
status = compound_status.Status("mylabel", priority=12)
|
||||
self.assertEqual(
|
||||
status._serialize(),
|
||||
{
|
||||
"status": "unknown",
|
||||
"message": "",
|
||||
},
|
||||
)
|
||||
|
||||
def test_status_pool_priority(self) -> None:
|
||||
"""A status pool should display the highest priority status."""
|
||||
pool = self.harness.charm.status_pool
|
||||
|
||||
status1 = compound_status.Status("test1")
|
||||
pool.add(status1)
|
||||
status2 = compound_status.Status("test2", priority=100)
|
||||
pool.add(status2)
|
||||
status3 = compound_status.Status("test3", priority=30)
|
||||
pool.add(status3)
|
||||
|
||||
status1.set(WaitingStatus(""))
|
||||
status2.set(WaitingStatus(""))
|
||||
status3.set(WaitingStatus(""))
|
||||
|
||||
# status2 has highest priority
|
||||
self.assertEqual(
|
||||
self.harness.charm.unit.status, WaitingStatus("(test2)")
|
||||
)
|
||||
|
||||
# status3 will new be displayed,
|
||||
# since blocked is more severe than waiting
|
||||
status3.set(BlockedStatus(":("))
|
||||
self.assertEqual(
|
||||
self.harness.charm.unit.status, BlockedStatus("(test3) :(")
|
||||
)
|
||||
|
||||
def test_add_status_idempotency(self) -> None:
|
||||
"""Should not be issues if add same status twice."""
|
||||
pool = self.harness.charm.status_pool
|
||||
|
||||
status1 = compound_status.Status("test1", priority=200)
|
||||
pool.add(status1)
|
||||
|
||||
status1.set(WaitingStatus("test"))
|
||||
self.assertEqual(
|
||||
self.harness.charm.unit.status,
|
||||
WaitingStatus("(test1) test"),
|
||||
)
|
||||
|
||||
new_status1 = compound_status.Status("test1", priority=201)
|
||||
new_status1.set(BlockedStatus(""))
|
||||
pool.add(new_status1)
|
||||
|
||||
# should be the new object in the pool
|
||||
self.assertIs(new_status1, pool._pool["test1"])
|
||||
self.assertEqual(new_status1.priority(), (1, -201))
|
||||
self.assertEqual(
|
||||
self.harness.charm.unit.status,
|
||||
BlockedStatus("(test1)"),
|
||||
)
|
||||
|
||||
def test_all_active_status(self) -> None:
|
||||
"""Should not be issues if add same status twice."""
|
||||
pool = self.harness.charm.status_pool
|
||||
self.harness.charm.bootstrap_status.set(ActiveStatus())
|
||||
|
||||
status1 = compound_status.Status("test1")
|
||||
pool.add(status1)
|
||||
status2 = compound_status.Status("test2", priority=150)
|
||||
pool.add(status2)
|
||||
status3 = compound_status.Status("test3", priority=30)
|
||||
pool.add(status3)
|
||||
|
||||
status1.set(ActiveStatus(""))
|
||||
status2.set(ActiveStatus(""))
|
||||
status3.set(ActiveStatus(""))
|
||||
|
||||
# also need to manually activate other default statuses
|
||||
pool._pool["container:my-service"].set(ActiveStatus(""))
|
||||
|
||||
# all empty messages should end up as an empty unit status
|
||||
self.assertEqual(self.harness.charm.unit.status, ActiveStatus(""))
|
||||
|
||||
# if there's a message (on the highest priority status),
|
||||
# it should also show the status prefix
|
||||
status2.set(ActiveStatus("a message"))
|
||||
self.assertEqual(
|
||||
self.harness.charm.unit.status, ActiveStatus("(test2) a message")
|
||||
)
|
505
ops-sunbeam/tests/unit_tests/test_core.py
Normal file
505
ops-sunbeam/tests/unit_tests/test_core.py
Normal file
@@ -0,0 +1,505 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test aso."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import mock
|
||||
|
||||
sys.path.append("tests/lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
import ops.model
|
||||
|
||||
import ops_sunbeam.charm as sunbeam_charm
|
||||
import ops_sunbeam.test_utils as test_utils
|
||||
|
||||
from . import (
|
||||
test_charms,
|
||||
)
|
||||
|
||||
|
||||
class TestOSBaseOperatorCharm(test_utils.CharmTestCase):
|
||||
"""Test for the OSBaseOperatorCharm class."""
|
||||
|
||||
PATCHES = []
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setup."""
|
||||
self.container_calls = test_utils.ContainerCalls()
|
||||
super().setUp(sunbeam_charm, self.PATCHES)
|
||||
self.mock_event = mock.MagicMock()
|
||||
self.harness = test_utils.get_harness(
|
||||
test_charms.MyCharm,
|
||||
test_charms.CHARM_METADATA,
|
||||
self.container_calls,
|
||||
charm_config=test_charms.CHARM_CONFIG,
|
||||
initial_charm_config=test_charms.INITIAL_CHARM_CONFIG,
|
||||
)
|
||||
self.harness.begin()
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
|
||||
def test_write_config(self) -> None:
|
||||
"""Test writing config when charm is ready."""
|
||||
self.assertEqual(self.container_calls.push["my-service"], [])
|
||||
|
||||
def test_relation_handlers_ready(self) -> None:
|
||||
"""Test relation handlers are ready."""
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
set(),
|
||||
)
|
||||
|
||||
|
||||
class TestOSBaseOperatorCharmK8S(test_utils.CharmTestCase):
|
||||
"""Test for the OSBaseOperatorCharm class."""
|
||||
|
||||
PATCHES = []
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setup."""
|
||||
self.container_calls = test_utils.ContainerCalls()
|
||||
super().setUp(sunbeam_charm, self.PATCHES)
|
||||
self.harness = test_utils.get_harness(
|
||||
test_charms.MyCharmK8S,
|
||||
test_charms.CHARM_METADATA_K8S,
|
||||
self.container_calls,
|
||||
charm_config=test_charms.CHARM_CONFIG,
|
||||
initial_charm_config=test_charms.INITIAL_CHARM_CONFIG,
|
||||
)
|
||||
self.mock_event = mock.MagicMock()
|
||||
self.harness.begin()
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
|
||||
def set_pebble_ready(self) -> None:
|
||||
"""Set pebble ready event."""
|
||||
self.harness.container_pebble_ready("my-service")
|
||||
|
||||
def test_pebble_ready_handler(self) -> None:
|
||||
"""Test is raised and observed."""
|
||||
self.assertEqual(self.harness.charm.seen_events, [])
|
||||
self.set_pebble_ready()
|
||||
self.assertEqual(self.harness.charm.seen_events, ["PebbleReadyEvent"])
|
||||
|
||||
def test_write_config(self) -> None:
|
||||
"""Test writing config when charm is ready."""
|
||||
self.set_pebble_ready()
|
||||
self.assertEqual(self.container_calls.push["my-service"], [])
|
||||
|
||||
def test_container_names(self) -> None:
|
||||
"""Test container name list is correct."""
|
||||
self.assertEqual(self.harness.charm.container_names, ["my-service"])
|
||||
|
||||
def test_relation_handlers_ready(self) -> None:
|
||||
"""Test relation handlers are ready."""
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
set(),
|
||||
)
|
||||
|
||||
|
||||
class _TestOSBaseOperatorAPICharm(test_utils.CharmTestCase):
|
||||
"""Test for the OSBaseOperatorAPICharm class."""
|
||||
|
||||
PATCHES = []
|
||||
|
||||
def setUp(self, charm_to_test: test_charms.MyAPICharm) -> None:
|
||||
"""Charm test class setup."""
|
||||
self.container_calls = test_utils.ContainerCalls()
|
||||
|
||||
super().setUp(sunbeam_charm, self.PATCHES)
|
||||
self.mock_event = mock.MagicMock()
|
||||
self.harness = test_utils.get_harness(
|
||||
charm_to_test,
|
||||
test_charms.API_CHARM_METADATA,
|
||||
self.container_calls,
|
||||
charm_config=test_charms.CHARM_CONFIG,
|
||||
initial_charm_config=test_charms.INITIAL_CHARM_CONFIG,
|
||||
)
|
||||
|
||||
# clean up events that were dynamically defined,
|
||||
# otherwise we get issues because they'll be redefined,
|
||||
# which is not allowed.
|
||||
from charms.data_platform_libs.v0.database_requires import (
|
||||
DatabaseEvents,
|
||||
)
|
||||
|
||||
for attr in (
|
||||
"database_database_created",
|
||||
"database_endpoints_changed",
|
||||
"database_read_only_endpoints_changed",
|
||||
):
|
||||
try:
|
||||
delattr(DatabaseEvents, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
self.harness.begin()
|
||||
|
||||
def set_pebble_ready(self) -> None:
|
||||
"""Set pebble ready event."""
|
||||
self.harness.container_pebble_ready("my-service")
|
||||
|
||||
|
||||
class TestOSBaseOperatorAPICharm(_TestOSBaseOperatorAPICharm):
|
||||
"""Test Charm with services."""
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Run test class setup."""
|
||||
super().setUp(test_charms.MyAPICharm)
|
||||
|
||||
def test_write_config(self) -> None:
|
||||
"""Test when charm is ready configs are written correctly."""
|
||||
test_utils.add_complete_ingress_relation(self.harness)
|
||||
self.harness.set_leader()
|
||||
test_utils.add_complete_peer_relation(self.harness)
|
||||
self.set_pebble_ready()
|
||||
self.harness.charm.leader_set({"foo": "bar"})
|
||||
test_utils.add_api_relations(self.harness)
|
||||
test_utils.add_complete_identity_credentials_relation(self.harness)
|
||||
expect_entries = [
|
||||
"/bin/wsgi_admin",
|
||||
"hardpassword",
|
||||
"True",
|
||||
"rabbit://my-service:rabbit.pass@10.0.0.13:5672/openstack",
|
||||
"rabbithost1.local",
|
||||
"svcpass1",
|
||||
"bar",
|
||||
]
|
||||
expect_string = "\n" + "\n".join(expect_entries)
|
||||
self.harness.set_can_connect("my-service", True)
|
||||
effective_user_id = os.geteuid()
|
||||
effective_group_id = os.getegid()
|
||||
self.check_file(
|
||||
"my-service",
|
||||
"/etc/my-service/my-service.conf",
|
||||
contents=expect_string,
|
||||
user=effective_user_id,
|
||||
group=effective_group_id,
|
||||
)
|
||||
self.check_file(
|
||||
"my-service",
|
||||
"/etc/apache2/sites-available/wsgi-my-service.conf",
|
||||
contents=expect_string,
|
||||
user=effective_user_id,
|
||||
group=effective_group_id,
|
||||
)
|
||||
|
||||
def test_assess_status(self) -> None:
|
||||
"""Test charm is setting status correctly."""
|
||||
test_utils.add_complete_ingress_relation(self.harness)
|
||||
self.harness.set_leader()
|
||||
test_utils.add_complete_peer_relation(self.harness)
|
||||
self.harness.charm.leader_set({"foo": "bar"})
|
||||
test_utils.add_api_relations(self.harness)
|
||||
test_utils.add_complete_identity_credentials_relation(self.harness)
|
||||
self.harness.set_can_connect("my-service", True)
|
||||
self.assertNotEqual(
|
||||
self.harness.charm.status.status, ops.model.ActiveStatus()
|
||||
)
|
||||
self.set_pebble_ready()
|
||||
for ph in self.harness.charm.pebble_handlers:
|
||||
self.assertTrue(ph.service_ready)
|
||||
|
||||
self.assertEqual(
|
||||
self.harness.charm.status.status, ops.model.ActiveStatus()
|
||||
)
|
||||
|
||||
def test_start_services(self) -> None:
|
||||
"""Test service is started."""
|
||||
test_utils.add_complete_ingress_relation(self.harness)
|
||||
self.harness.set_leader()
|
||||
test_utils.add_complete_peer_relation(self.harness)
|
||||
self.set_pebble_ready()
|
||||
self.harness.charm.leader_set({"foo": "bar"})
|
||||
test_utils.add_api_relations(self.harness)
|
||||
test_utils.add_complete_identity_credentials_relation(self.harness)
|
||||
self.harness.set_can_connect("my-service", True)
|
||||
self.assertEqual(
|
||||
self.container_calls.started_services("my-service"),
|
||||
["wsgi-my-service"],
|
||||
)
|
||||
|
||||
def test__on_database_changed(self) -> None:
|
||||
"""Test database is requested."""
|
||||
rel_id = self.harness.add_relation("peers", "my-service")
|
||||
self.harness.add_relation_unit(rel_id, "my-service/1")
|
||||
self.harness.set_leader()
|
||||
self.set_pebble_ready()
|
||||
db_rel_id = test_utils.add_base_db_relation(self.harness)
|
||||
test_utils.add_db_relation_credentials(self.harness, db_rel_id)
|
||||
rel_data = self.harness.get_relation_data(db_rel_id, "my-service")
|
||||
requested_db = rel_data["database"]
|
||||
self.assertEqual(requested_db, "my_service")
|
||||
|
||||
def test_contexts(self) -> None:
|
||||
"""Test contexts are correctly populated."""
|
||||
rel_id = self.harness.add_relation("peers", "my-service")
|
||||
self.harness.add_relation_unit(rel_id, "my-service/1")
|
||||
self.harness.set_leader()
|
||||
self.set_pebble_ready()
|
||||
db_rel_id = test_utils.add_base_db_relation(self.harness)
|
||||
test_utils.add_db_relation_credentials(self.harness, db_rel_id)
|
||||
contexts = self.harness.charm.contexts()
|
||||
self.assertEqual(
|
||||
contexts.wsgi_config.wsgi_admin_script, "/bin/wsgi_admin"
|
||||
)
|
||||
self.assertEqual(contexts.database.database_password, "hardpassword")
|
||||
self.assertEqual(contexts.options.debug, True)
|
||||
|
||||
def test_peer_leader_db(self) -> None:
|
||||
"""Test interacting with peer app db."""
|
||||
rel_id = self.harness.add_relation("peers", "my-service")
|
||||
self.harness.add_relation_unit(rel_id, "my-service/1")
|
||||
self.harness.set_leader()
|
||||
self.harness.charm.leader_set({"ready": "true"})
|
||||
self.harness.charm.leader_set({"foo": "bar"})
|
||||
self.harness.charm.leader_set(ginger="biscuit")
|
||||
rel_data = self.harness.get_relation_data(rel_id, "my-service")
|
||||
self.assertEqual(
|
||||
rel_data, {"ready": "true", "foo": "bar", "ginger": "biscuit"}
|
||||
)
|
||||
self.assertEqual(self.harness.charm.leader_get("ready"), "true")
|
||||
self.assertEqual(self.harness.charm.leader_get("foo"), "bar")
|
||||
self.assertEqual(self.harness.charm.leader_get("ginger"), "biscuit")
|
||||
|
||||
def test_peer_unit_data(self) -> None:
|
||||
"""Test interacting with peer app db."""
|
||||
rel_id = self.harness.add_relation("peers", "my-service")
|
||||
self.harness.add_relation_unit(rel_id, "my-service/1")
|
||||
self.harness.update_relation_data(
|
||||
rel_id, "my-service/1", {"today": "monday"}
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.peers.get_all_unit_values(
|
||||
"today",
|
||||
include_local_unit=False,
|
||||
),
|
||||
["monday"],
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.peers.get_all_unit_values(
|
||||
"today",
|
||||
include_local_unit=True,
|
||||
),
|
||||
["monday"],
|
||||
)
|
||||
self.harness.charm.peers.set_unit_data({"today": "friday"})
|
||||
self.assertEqual(
|
||||
self.harness.charm.peers.get_all_unit_values(
|
||||
"today",
|
||||
include_local_unit=False,
|
||||
),
|
||||
["monday"],
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.peers.get_all_unit_values(
|
||||
"today",
|
||||
include_local_unit=True,
|
||||
),
|
||||
["monday", "friday"],
|
||||
)
|
||||
|
||||
def test_peer_leader_ready(self) -> None:
|
||||
"""Test peer leader ready methods."""
|
||||
rel_id = self.harness.add_relation("peers", "my-service")
|
||||
self.harness.add_relation_unit(rel_id, "my-service/1")
|
||||
self.harness.set_leader()
|
||||
self.assertFalse(self.harness.charm.is_leader_ready())
|
||||
self.harness.charm.set_leader_ready()
|
||||
self.assertTrue(self.harness.charm.is_leader_ready())
|
||||
|
||||
def test_endpoint_urls(self) -> None:
|
||||
"""Test public_url and internal_url properties."""
|
||||
# Add ingress relation
|
||||
test_utils.add_complete_ingress_relation(self.harness)
|
||||
self.assertEqual(
|
||||
self.harness.charm.internal_url, "http://internal-url:80"
|
||||
)
|
||||
self.assertEqual(self.harness.charm.public_url, "http://public-url:80")
|
||||
|
||||
@mock.patch("ops_sunbeam.charm.Client")
|
||||
def test_endpoint_urls_no_ingress(self, mock_client: mock.patch) -> None:
|
||||
"""Test public_url and internal_url with no ingress defined."""
|
||||
|
||||
class MockService:
|
||||
"""Mock lightkube client service object."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.status = None
|
||||
|
||||
mock_client.return_value = mock.MagicMock()
|
||||
mock_client.return_value.get.return_value = MockService()
|
||||
self.assertEqual(
|
||||
self.harness.charm.internal_url, "http://10.0.0.10:789"
|
||||
)
|
||||
self.assertEqual(self.harness.charm.public_url, "http://10.0.0.10:789")
|
||||
|
||||
def test_relation_handlers_ready(self) -> None:
|
||||
"""Test relation handlers are ready."""
|
||||
# Add all mandatory relations and test relation_handlers_ready
|
||||
db_rel_id = test_utils.add_base_db_relation(self.harness)
|
||||
test_utils.add_db_relation_credentials(self.harness, db_rel_id)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
{"identity-service", "ingress-public", "amqp"},
|
||||
)
|
||||
|
||||
amqp_rel_id = test_utils.add_base_amqp_relation(self.harness)
|
||||
test_utils.add_amqp_relation_credentials(self.harness, amqp_rel_id)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
{"ingress-public", "identity-service"},
|
||||
)
|
||||
|
||||
identity_rel_id = test_utils.add_base_identity_service_relation(
|
||||
self.harness
|
||||
)
|
||||
test_utils.add_identity_service_relation_response(
|
||||
self.harness, identity_rel_id
|
||||
)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
{"ingress-public"},
|
||||
)
|
||||
|
||||
ingress_rel_id = test_utils.add_ingress_relation(
|
||||
self.harness, "public"
|
||||
)
|
||||
test_utils.add_ingress_relation_data(
|
||||
self.harness, ingress_rel_id, "public"
|
||||
)
|
||||
|
||||
ceph_access_rel_id = test_utils.add_base_ceph_access_relation(
|
||||
self.harness
|
||||
)
|
||||
test_utils.add_ceph_access_relation_response(
|
||||
self.harness, ceph_access_rel_id
|
||||
)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
set(),
|
||||
)
|
||||
|
||||
# Add an optional relation and test if relation_handlers_ready
|
||||
# returns True
|
||||
optional_rel_id = test_utils.add_ingress_relation(
|
||||
self.harness, "internal"
|
||||
)
|
||||
test_utils.add_ingress_relation_data(
|
||||
self.harness, optional_rel_id, "internal"
|
||||
)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
set(),
|
||||
)
|
||||
|
||||
# Remove a mandatory relation and test if relation_handlers_ready
|
||||
# returns False
|
||||
self.harness.remove_relation(ingress_rel_id)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
{"ingress-public"},
|
||||
)
|
||||
|
||||
# Add the mandatory relation back and retest relation_handlers_ready
|
||||
ingress_rel_id = test_utils.add_ingress_relation(
|
||||
self.harness, "public"
|
||||
)
|
||||
test_utils.add_ingress_relation_data(
|
||||
self.harness, ingress_rel_id, "public"
|
||||
)
|
||||
self.assertSetEqual(
|
||||
self.harness.charm.get_mandatory_relations_not_ready(
|
||||
self.mock_event
|
||||
),
|
||||
set(),
|
||||
)
|
||||
|
||||
def test_add_explicit_port(self):
|
||||
"""Test add_explicit_port method."""
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port("http://test.org/something"),
|
||||
"http://test.org:80/something",
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port(
|
||||
"http://test.org:80/something"
|
||||
),
|
||||
"http://test.org:80/something",
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port("https://test.org/something"),
|
||||
"https://test.org:443/something",
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port(
|
||||
"https://test.org:443/something"
|
||||
),
|
||||
"https://test.org:443/something",
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port(
|
||||
"http://test.org:8080/something"
|
||||
),
|
||||
"http://test.org:8080/something",
|
||||
)
|
||||
self.assertEqual(
|
||||
self.harness.charm.add_explicit_port(
|
||||
"https://test.org:8443/something"
|
||||
),
|
||||
"https://test.org:8443/something",
|
||||
)
|
||||
|
||||
|
||||
class TestOSBaseOperatorMultiSVCAPICharm(_TestOSBaseOperatorAPICharm):
|
||||
"""Test Charm with multiple services."""
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setip."""
|
||||
super().setUp(test_charms.TestMultiSvcCharm)
|
||||
|
||||
def test_start_services(self) -> None:
|
||||
"""Test multiple services are started."""
|
||||
test_utils.add_complete_ingress_relation(self.harness)
|
||||
self.harness.set_leader()
|
||||
test_utils.add_complete_peer_relation(self.harness)
|
||||
self.set_pebble_ready()
|
||||
self.harness.charm.leader_set({"foo": "bar"})
|
||||
test_utils.add_api_relations(self.harness)
|
||||
test_utils.add_complete_identity_credentials_relation(self.harness)
|
||||
self.harness.set_can_connect("my-service", True)
|
||||
self.assertEqual(
|
||||
sorted(self.container_calls.started_services("my-service")),
|
||||
sorted(["apache forwarder", "my-service"]),
|
||||
)
|
95
ops-sunbeam/tests/unit_tests/test_job_ctrl.py
Normal file
95
ops-sunbeam/tests/unit_tests/test_job_ctrl.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test job ctrl code."""
|
||||
|
||||
import sys
|
||||
|
||||
sys.path.append("lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
import ops_sunbeam.job_ctrl as sunbeam_job_ctrl
|
||||
import ops_sunbeam.test_utils as test_utils
|
||||
|
||||
from . import (
|
||||
test_charms,
|
||||
)
|
||||
|
||||
|
||||
class JobCtrlCharm(test_charms.MyAPICharm):
|
||||
"""Test charm that use job ctrl code."""
|
||||
|
||||
unit_job_counter = 1
|
||||
|
||||
@sunbeam_job_ctrl.run_once_per_unit("unit-job")
|
||||
def unit_specific_job(self):
|
||||
"""Run a dummy once per unit job."""
|
||||
self.unit_job_counter = self.unit_job_counter + 1
|
||||
|
||||
|
||||
class TestJobCtrl(test_utils.CharmTestCase):
|
||||
"""Test for the OSBaseOperatorCharm class."""
|
||||
|
||||
PATCHES = ["time"]
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setup."""
|
||||
self.container_calls = test_utils.ContainerCalls()
|
||||
super().setUp(sunbeam_job_ctrl, self.PATCHES)
|
||||
self.harness = test_utils.get_harness(
|
||||
JobCtrlCharm,
|
||||
test_charms.API_CHARM_METADATA,
|
||||
self.container_calls,
|
||||
charm_config=test_charms.CHARM_CONFIG,
|
||||
initial_charm_config=test_charms.INITIAL_CHARM_CONFIG,
|
||||
)
|
||||
# clean up events that were dynamically defined,
|
||||
# otherwise we get issues because they'll be redefined,
|
||||
# which is not allowed.
|
||||
from charms.data_platform_libs.v0.database_requires import (
|
||||
DatabaseEvents,
|
||||
)
|
||||
|
||||
for attr in (
|
||||
"database_database_created",
|
||||
"database_endpoints_changed",
|
||||
"database_read_only_endpoints_changed",
|
||||
):
|
||||
try:
|
||||
delattr(DatabaseEvents, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
self.addCleanup(self.harness.cleanup)
|
||||
self.harness.begin()
|
||||
|
||||
def test_local_job_storage(self) -> None:
|
||||
"""Test local job storage."""
|
||||
local_job_storage = sunbeam_job_ctrl.LocalJobStorage(
|
||||
self.harness.charm._state
|
||||
)
|
||||
self.assertEqual(dict(local_job_storage.get_labels()), {})
|
||||
local_job_storage.add("my-job")
|
||||
self.assertIn("my-job", local_job_storage.get_labels())
|
||||
|
||||
def test_run_once_per_unit(self) -> None:
|
||||
"""Test run_once_per_unit decorator."""
|
||||
self.harness.charm._state.run_once = {}
|
||||
call_counter = self.harness.charm.unit_job_counter
|
||||
self.harness.charm.unit_specific_job()
|
||||
expected_count = call_counter + 1
|
||||
self.assertEqual(expected_count, self.harness.charm.unit_job_counter)
|
||||
self.harness.charm.unit_specific_job()
|
||||
# The call count should be unchanged as the job should not have
|
||||
# run
|
||||
self.assertEqual(expected_count, self.harness.charm.unit_job_counter)
|
82
ops-sunbeam/tests/unit_tests/test_templating.py
Normal file
82
ops-sunbeam/tests/unit_tests/test_templating.py
Normal file
@@ -0,0 +1,82 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test ops_sunbeam.templating."""
|
||||
|
||||
import sys
|
||||
from io import (
|
||||
BytesIO,
|
||||
TextIOWrapper,
|
||||
)
|
||||
|
||||
import jinja2
|
||||
import mock
|
||||
|
||||
sys.path.append("lib") # noqa
|
||||
sys.path.append("src") # noqa
|
||||
|
||||
import ops_sunbeam.core as sunbeam_core
|
||||
import ops_sunbeam.templating as sunbeam_templating
|
||||
import ops_sunbeam.test_utils as test_utils
|
||||
|
||||
|
||||
class TestTemplating(test_utils.CharmTestCase):
|
||||
"""Tests for ops_sunbeam.templating.."""
|
||||
|
||||
PATCHES = []
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Charm test class setup."""
|
||||
super().setUp(sunbeam_templating, self.PATCHES)
|
||||
|
||||
@mock.patch("jinja2.FileSystemLoader")
|
||||
def test_render(self, fs_loader: "jinja2.FileSystemLoader") -> None:
|
||||
"""Check rendering templates."""
|
||||
container_mock = mock.MagicMock()
|
||||
config = sunbeam_core.ContainerConfigFile(
|
||||
"/tmp/testfile.txt", "myuser", "mygrp"
|
||||
)
|
||||
fs_loader.return_value = jinja2.DictLoader(
|
||||
{"testfile.txt": "debug = {{ debug }}"}
|
||||
)
|
||||
sunbeam_templating.sidecar_config_render(
|
||||
container_mock, config, "/tmp/templates", {"debug": True}
|
||||
)
|
||||
container_mock.push.assert_called_once_with(
|
||||
"/tmp/testfile.txt",
|
||||
"debug = True",
|
||||
user="myuser",
|
||||
group="mygrp",
|
||||
permissions=None,
|
||||
)
|
||||
|
||||
@mock.patch("jinja2.FileSystemLoader")
|
||||
def test_render_no_change(
|
||||
self, fs_loader: "jinja2.FileSystemLoader"
|
||||
) -> None:
|
||||
"""Check rendering template with no content change."""
|
||||
container_mock = mock.MagicMock()
|
||||
container_mock.pull.return_value = TextIOWrapper(
|
||||
BytesIO(b"debug = True")
|
||||
)
|
||||
config = sunbeam_core.ContainerConfigFile(
|
||||
"/tmp/testfile.txt", "myuser", "mygrp"
|
||||
)
|
||||
fs_loader.return_value = jinja2.DictLoader(
|
||||
{"testfile.txt": "debug = {{ debug }}"}
|
||||
)
|
||||
sunbeam_templating.sidecar_config_render(
|
||||
container_mock, config, "/tmp/templates", {"debug": True}
|
||||
)
|
||||
self.assertFalse(container_mock.push.called)
|
135
ops-sunbeam/tox.ini
Normal file
135
ops-sunbeam/tox.ini
Normal file
@@ -0,0 +1,135 @@
|
||||
# Operator charm helper: tox.ini
|
||||
|
||||
[tox]
|
||||
skipsdist = True
|
||||
envlist = lint, py3
|
||||
sitepackages = False
|
||||
skip_missing_interpreters = False
|
||||
minversion = 3.18.0
|
||||
|
||||
[vars]
|
||||
src_path = {toxinidir}/ops_sunbeam
|
||||
tst_path = {toxinidir}/tests/unit_tests/
|
||||
scenario_tst_path = {toxinidir}/tests/scenario_tests/
|
||||
tst_lib_path = {toxinidir}/tests/lib/
|
||||
pyproject_toml = {toxinidir}/pyproject.toml
|
||||
cookie_cutter_path = {toxinidir}/shared_code/sunbeam_charm/\{\{cookiecutter.service_name\}\}
|
||||
all_path = {[vars]src_path} {[vars]tst_path}
|
||||
|
||||
[testenv]
|
||||
basepython = python3
|
||||
install_command =
|
||||
pip install {opts} {packages}
|
||||
commands =
|
||||
stestr run --slowest {posargs}
|
||||
pytest -v --tb native {[vars]scenario_tst_path} --log-cli-level=INFO
|
||||
allowlist_externals =
|
||||
git
|
||||
charmcraft
|
||||
fetch-libs.sh
|
||||
deps =
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:fmt]
|
||||
description = Apply coding style standards to code
|
||||
deps =
|
||||
black
|
||||
isort
|
||||
commands =
|
||||
isort {[vars]all_path} --skip-glob {[vars]tst_lib_path} --skip {toxinidir}/.tox
|
||||
black --config {[vars]pyproject_toml} {[vars]all_path} --exclude {[vars]tst_lib_path}
|
||||
|
||||
[testenv:fetch]
|
||||
basepython = python3
|
||||
deps =
|
||||
commands =
|
||||
{toxinidir}/fetch-libs.sh
|
||||
|
||||
[testenv:cookie]
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/cookie-requirements.txt
|
||||
commands = /bin/true
|
||||
|
||||
[testenv:py3]
|
||||
basepython = python3
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
[testenv:py38]
|
||||
basepython = python3.8
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:py39]
|
||||
basepython = python3.9
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:py310]
|
||||
basepython = python3.10
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:py311]
|
||||
basepython = python3.11
|
||||
deps = {[testenv:py3]deps}
|
||||
|
||||
[testenv:pep8]
|
||||
description = Alias for lint
|
||||
deps = {[testenv:lint]deps}
|
||||
commands = {[testenv:lint]commands}
|
||||
|
||||
[testenv:lint]
|
||||
description = Check code against coding style standards
|
||||
deps =
|
||||
black
|
||||
flake8<6
|
||||
flake8-docstrings
|
||||
flake8-copyright
|
||||
flake8-builtins
|
||||
pyproject-flake8
|
||||
pep8-naming
|
||||
isort
|
||||
codespell
|
||||
commands =
|
||||
codespell {[vars]all_path}
|
||||
# pflake8 wrapper supports config from pyproject.toml
|
||||
pflake8 --exclude {[vars]tst_lib_path} --config {toxinidir}/pyproject.toml {[vars]all_path}
|
||||
isort --check-only --diff {[vars]all_path} --skip-glob {[vars]tst_lib_path}
|
||||
black --config {[vars]pyproject_toml} --check --diff {[vars]all_path} --exclude {[vars]tst_lib_path}
|
||||
|
||||
[testenv:cover]
|
||||
basepython = python3
|
||||
deps = {[testenv:py3]deps}
|
||||
setenv =
|
||||
PYTHON=coverage run
|
||||
commands =
|
||||
coverage erase
|
||||
stestr run --slowest {posargs}
|
||||
coverage combine
|
||||
coverage html -d cover
|
||||
coverage xml -o cover/coverage.xml
|
||||
coverage report
|
||||
|
||||
[testenv:scenario]
|
||||
description = Scenario tests
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
pytest -v --tb native {[vars]scenario_tst_path} --log-cli-level=INFO
|
||||
|
||||
[coverage:run]
|
||||
branch = True
|
||||
concurrency = multiprocessing
|
||||
parallel = True
|
||||
source =
|
||||
.
|
||||
omit =
|
||||
.tox/*
|
||||
unit_tests/*
|
||||
|
||||
[testenv:venv]
|
||||
basepython = python3
|
||||
commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
ignore = E226,E402,ANN101,ANN003,W504
|
Reference in New Issue
Block a user