From 74dc1566601c56ba696de4c7a49dd83f76a150ed Mon Sep 17 00:00:00 2001
From: Aurelien Lourot <aurelien.lourot@canonical.com>
Date: Fri, 28 Aug 2020 14:05:05 +0200
Subject: [PATCH] Add Victoria to the test gate

Also sync libraries.
Also take trusty-mitaka out of the gate because of linked bug.
Also fixed Victoria bundles as they were trying to deploy
percona-cluster.

Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/434
Change-Id: I575d00b993fbff33d80956278b01e87e434713e0
Related-Bug: #1877076
---
 Makefile                                      |   4 -
 .../contrib/openstack/cert_utils.py           | 124 ++++++++++----
 hooks/charmhelpers/contrib/openstack/ip.py    |  16 ++
 hooks/charmhelpers/contrib/openstack/utils.py |   2 +-
 .../contrib/storage/linux/ceph.py             |  26 ++-
 test-requirements.txt                         |   2 +
 tests/bundles/focal-victoria.yaml             | 162 +++++++++++++-----
 tests/bundles/groovy-victoria.yaml            | 162 +++++++++++++-----
 tests/tests.yaml                              |   9 +-
 9 files changed, 385 insertions(+), 122 deletions(-)

diff --git a/Makefile b/Makefile
index dd69e16..b2f6479 100644
--- a/Makefile
+++ b/Makefile
@@ -19,7 +19,3 @@ bin/charm_helpers_sync.py:
 
 sync: bin/charm_helpers_sync.py
 	@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
-
-publish: lint unit_test
-	bzr push lp:charms/cinder-backup
-	bzr push lp:charms/trusty/cinder-backup
\ No newline at end of file
diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py
index b494af6..1eb2154 100644
--- a/hooks/charmhelpers/contrib/openstack/cert_utils.py
+++ b/hooks/charmhelpers/contrib/openstack/cert_utils.py
@@ -34,12 +34,14 @@ from charmhelpers.core.hookenv import (
     WARNING,
 )
 from charmhelpers.contrib.openstack.ip import (
-    ADMIN,
     resolve_address,
     get_vip_in_network,
-    INTERNAL,
-    PUBLIC,
-    ADDRESS_MAP)
+    ADDRESS_MAP,
+    get_default_api_bindings,
+)
+from charmhelpers.contrib.network.ip import (
+    get_relation_ip,
+)
 
 from charmhelpers.core.host import (
     mkdir,
@@ -113,44 +115,118 @@ class CertRequest(object):
         return req
 
 
-def get_certificate_request(json_encode=True):
-    """Generate a certificatee requests based on the network confioguration
+def get_certificate_request(json_encode=True, bindings=None):
+    """Generate a certificate requests based on the network configuration
 
+    :param json_encode: Encode request in JSON or not. Used for setting
+                        directly on a relation.
+    :type json_encode: boolean
+    :param bindings: List of bindings to check in addition to default api
+                     bindings.
+    :type bindings: list of strings
+    :returns: CertRequest request as dictionary or JSON string.
+    :rtype: Union[dict, json]
     """
+    if bindings:
+        # Add default API bindings to bindings list
+        bindings = set(bindings + get_default_api_bindings())
+    else:
+        # Use default API bindings
+        bindings = get_default_api_bindings()
     req = CertRequest(json_encode=json_encode)
     req.add_hostname_cn()
     # Add os-hostname entries
-    for net_type in [INTERNAL, ADMIN, PUBLIC]:
-        net_config = config(ADDRESS_MAP[net_type]['override'])
+    _sans = get_certificate_sans()
+
+    # Handle specific hostnames per binding
+    for binding in bindings:
+        hostname_override = config(ADDRESS_MAP[binding]['override'])
         try:
-            net_addr = resolve_address(endpoint_type=net_type)
+            net_addr = resolve_address(endpoint_type=binding)
             ip = network_get_primary_address(
-                ADDRESS_MAP[net_type]['binding'])
+                ADDRESS_MAP[binding]['binding'])
             addresses = [net_addr, ip]
             vip = get_vip_in_network(resolve_network_cidr(ip))
             if vip:
                 addresses.append(vip)
-            if net_config:
+            # Add hostname certificate request
+            if hostname_override:
                 req.add_entry(
-                    net_type,
-                    net_config,
+                    binding,
+                    hostname_override,
                     addresses)
-            else:
-                # There is network address with no corresponding hostname.
-                # Add the ip to the hostname cert to allow for this.
-                req.add_hostname_cn_ip(addresses)
+            # Remove hostname specific addresses from _sans
+            for addr in addresses:
+                try:
+                    _sans.remove(addr)
+                except (ValueError, KeyError):
+                    pass
+
         except NoNetworkBinding:
             log("Skipping request for certificate for ip in {} space, no "
-                "local address found".format(net_type), WARNING)
+                "local address found".format(binding), WARNING)
+    # Gurantee all SANs are covered
+    # These are network addresses with no corresponding hostname.
+    # Add the ips to the hostname cert to allow for this.
+    req.add_hostname_cn_ip(_sans)
     return req.get_request()
 
 
+def get_certificate_sans(bindings=None):
+    """Get all possible IP addresses for certificate SANs.
+    """
+    _sans = [unit_get('private-address')]
+    if bindings:
+        # Add default API bindings to bindings list
+        bindings = set(bindings + get_default_api_bindings())
+    else:
+        # Use default API bindings
+        bindings = get_default_api_bindings()
+
+    for binding in bindings:
+        # Check for config override
+        try:
+            net_config = config(ADDRESS_MAP[binding]['config'])
+        except KeyError:
+            # There is no configuration network for this binding name
+            net_config = None
+        # Using resolve_address is likely redundant. Keeping it here in
+        # case there is an edge case it handles.
+        net_addr = resolve_address(endpoint_type=binding)
+        ip = get_relation_ip(binding, cidr_network=net_config)
+        _sans = _sans + [net_addr, ip]
+        vip = get_vip_in_network(resolve_network_cidr(ip))
+        if vip:
+            _sans.append(vip)
+    return set(_sans)
+
+
 def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
     """Create symlinks for SAN records
 
     :param ssl_dir: str Directory to create symlinks in
     :param custom_hostname_link: str Additional link to be created
     """
+
+    # This includes the hostname cert and any specific bindng certs:
+    # admin, internal, public
+    req = get_certificate_request(json_encode=False)["cert_requests"]
+    # Specific certs
+    for cert_req in req.keys():
+        requested_cert = os.path.join(
+            ssl_dir,
+            'cert_{}'.format(cert_req))
+        requested_key = os.path.join(
+            ssl_dir,
+            'key_{}'.format(cert_req))
+        for addr in req[cert_req]['sans']:
+            cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
+            key = os.path.join(ssl_dir, 'key_{}'.format(addr))
+            if os.path.isfile(requested_cert) and not os.path.isfile(cert):
+                os.symlink(requested_cert, cert)
+                os.symlink(requested_key, key)
+
+    # Handle custom hostnames
     hostname = get_hostname(unit_get('private-address'))
     hostname_cert = os.path.join(
         ssl_dir,
@@ -158,18 +234,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
     hostname_key = os.path.join(
         ssl_dir,
         'key_{}'.format(hostname))
-    # Add links to hostname cert, used if os-hostname vars not set
-    for net_type in [INTERNAL, ADMIN, PUBLIC]:
-        try:
-            addr = resolve_address(endpoint_type=net_type)
-            cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
-            key = os.path.join(ssl_dir, 'key_{}'.format(addr))
-            if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
-                os.symlink(hostname_cert, cert)
-                os.symlink(hostname_key, key)
-        except NoNetworkBinding:
-            log("Skipping creating cert symlink for ip in {} space, no "
-                "local address found".format(net_type), WARNING)
     if custom_hostname_link:
         custom_cert = os.path.join(
             ssl_dir,
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index 723aebc..89cf276 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -33,6 +33,7 @@ INTERNAL = 'int'
 ADMIN = 'admin'
 ACCESS = 'access'
 
+# TODO: reconcile 'int' vs 'internal' binding names
 ADDRESS_MAP = {
     PUBLIC: {
         'binding': 'public',
@@ -58,6 +59,14 @@ ADDRESS_MAP = {
         'fallback': 'private-address',
         'override': 'os-access-hostname',
     },
+    # Note (thedac) bridge to begin the reconciliation between 'int' vs
+    # 'internal' binding names
+    'internal': {
+        'binding': 'internal',
+        'config': 'os-internal-network',
+        'fallback': 'private-address',
+        'override': 'os-internal-hostname',
+    },
 }
 
 
@@ -195,3 +204,10 @@ def get_vip_in_network(network):
             if is_address_in_network(network, vip):
                 matching_vip = vip
     return matching_vip
+
+
+def get_default_api_bindings():
+    _default_bindings = []
+    for binding in [INTERNAL, ADMIN, PUBLIC]:
+        _default_bindings.append(ADDRESS_MAP[binding]['binding'])
+    return _default_bindings
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 0aa797c..23e4adf 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -230,7 +230,7 @@ SWIFT_CODENAMES = OrderedDict([
     ('ussuri',
         ['2.24.0', '2.25.0']),
     ('victoria',
-        ['2.25.0']),
+        ['2.25.0', '2.26.0']),
 ])
 
 # >= Liberty version->codename mapping
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 526b95a..0f69631 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -41,6 +41,7 @@ from subprocess import (
 )
 from charmhelpers import deprecate
 from charmhelpers.core.hookenv import (
+    application_name,
     config,
     service_name,
     local_unit,
@@ -162,6 +163,17 @@ def get_osd_settings(relation_name):
     return _order_dict_by_key(osd_settings)
 
 
+def send_application_name(relid=None):
+    """Send the application name down the relation.
+
+    :param relid: Relation id to set application name in.
+    :type relid: str
+    """
+    relation_set(
+        relation_id=relid,
+        relation_settings={'application-name': application_name()})
+
+
 def send_osd_settings():
     """Pass on requested OSD settings to osd units."""
     try:
@@ -1074,7 +1086,10 @@ def create_erasure_profile(service, profile_name,
                            erasure_plugin_technique=None):
     """Create a new erasure code profile if one does not already exist for it.
 
-    Updates the profile if it exists. Please refer to [0] for more details.
+    Profiles are considered immutable so will not be updated if the named
+    profile already exists.
+
+    Please refer to [0] for more details.
 
     0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
 
@@ -1110,6 +1125,11 @@ def create_erasure_profile(service, profile_name,
     :type erasure_plugin_technique: str
     :return: None.  Can raise CalledProcessError, ValueError or AssertionError
     """
+    if erasure_profile_exists(service, profile_name):
+        log('EC profile {} exists, skipping update'.format(profile_name),
+            level=WARNING)
+        return
+
     plugin_techniques = {
         'jerasure': [
             'reed_sol_van',
@@ -1209,9 +1229,6 @@ def create_erasure_profile(service, profile_name,
         if scalar_mds:
             cmd.append('scalar-mds={}'.format(scalar_mds))
 
-    if erasure_profile_exists(service, profile_name):
-        cmd.append('--force')
-
     check_call(cmd)
 
 
@@ -2198,6 +2215,7 @@ def send_request_if_needed(request, relation='ceph'):
         for rid in relation_ids(relation):
             log('Sending request {}'.format(request.request_id), level=DEBUG)
             relation_set(relation_id=rid, broker_req=request.request)
+            relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()})
 
 
 def has_broker_rsp(rid=None, unit=None):
diff --git a/test-requirements.txt b/test-requirements.txt
index 56fbf92..8d51d55 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -15,5 +15,7 @@ flake8>=2.2.4
 stestr>=2.2.0
 coverage>=4.5.2
 pyudev              # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
+
+juju!=2.8.3         # this version causes spurious JujuAPIError's
 git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
 git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
diff --git a/tests/bundles/focal-victoria.yaml b/tests/bundles/focal-victoria.yaml
index 3db6388..c0bcc59 100644
--- a/tests/bundles/focal-victoria.yaml
+++ b/tests/bundles/focal-victoria.yaml
@@ -1,55 +1,73 @@
+variables:
+  openstack-origin: &openstack-origin cloud:focal-victoria
+
 series: focal
-relations:
-- - cinder-backup:ceph
-  - ceph-mon:client
-- - cinder-ceph:ceph
-  - ceph-mon:client
-- - ceph-osd:mon
-  - ceph-mon:osd
-- - cinder:storage-backend
-  - cinder-ceph:storage-backend
-- - cinder:backup-backend
-  - cinder-backup:backup-backend
-- - keystone:shared-db
-  - percona-cluster:shared-db
-- - cinder:shared-db
-  - percona-cluster:shared-db
-- - cinder:identity-service
-  - keystone:identity-service
-- - cinder:amqp
-  - rabbitmq-server:amqp
-- - glance:image-service
-  - nova-compute:image-service
-- - glance:identity-service
-  - keystone:identity-service
-- - glance:shared-db
-  - percona-cluster:shared-db
-- - nova-compute:ceph-access
-  - cinder-ceph:ceph-access
-- - nova-compute:amqp
-  - rabbitmq-server:amqp
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+
 applications:
-  percona-cluster:
-    charm: cs:~openstack-charmers-next/percona-cluster
-    num_units: 1
+
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
     options:
-      innodb-buffer-pool-size: 256M
-      max-connections: 1000
-      source: cloud:focal-victoria
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
   keystone:
     charm: cs:~openstack-charmers-next/keystone
     num_units: 1
     options:
-      openstack-origin: cloud:focal-victoria
+      openstack-origin: *openstack-origin
+    to:
+      - '3'
+
   rabbitmq-server:
     charm: cs:~openstack-charmers-next/rabbitmq-server
     num_units: 1
+    to:
+      - '4'
+
   ceph-mon:
     charm: cs:~openstack-charmers-next/ceph-mon
     num_units: 3
     options:
       monitor-count: '3'
-      source: cloud:focal-victoria
+      source: *openstack-origin
+    to:
+      - '5'
+      - '6'
+      - '7'
+
   ceph-osd:
     charm: cs:~openstack-charmers-next/ceph-osd
     num_units: 3
@@ -57,26 +75,90 @@ applications:
       osd-devices: 'cinder,10G'
     options:
       osd-devices: '/dev/test-non-existent'
-      source: cloud:focal-victoria
+      source: *openstack-origin
+    to:
+      - '8'
+      - '9'
+      - '10'
+
   cinder:
     charm: cs:~openstack-charmers-next/cinder
     num_units: 1
     options:
       block-device: 'None'
       glance-api-version: '2'
-      openstack-origin: cloud:focal-victoria
+      openstack-origin: *openstack-origin
+    to:
+      - '11'
+
   cinder-backup:
     charm: ../../../cinder-backup
-    series: focal
     options:
       ceph-osd-replication-count: 3
+
   cinder-ceph:
     charm: cs:~openstack-charmers-next/cinder-ceph
     options:
       ceph-osd-replication-count: 3
+
   glance:
     charm: cs:~openstack-charmers-next/glance
     num_units: 1
+    to:
+      - '12'
+
   nova-compute:
     charm: cs:~openstack-charmers-next/nova-compute
     num_units: 1
+    to:
+      - '13'
+
+relations:
+
+  - - 'cinder-backup:ceph'
+    - 'ceph-mon:client'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-mon:client'
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'cinder:storage-backend'
+    - 'cinder-ceph:storage-backend'
+
+  - - 'cinder:backup-backend'
+    - 'cinder-backup:backup-backend'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/groovy-victoria.yaml b/tests/bundles/groovy-victoria.yaml
index 84b22f8..0b4f85e 100644
--- a/tests/bundles/groovy-victoria.yaml
+++ b/tests/bundles/groovy-victoria.yaml
@@ -1,55 +1,73 @@
+variables:
+  openstack-origin: &openstack-origin distro
+
 series: groovy
-relations:
-- - cinder-backup:ceph
-  - ceph-mon:client
-- - cinder-ceph:ceph
-  - ceph-mon:client
-- - ceph-osd:mon
-  - ceph-mon:osd
-- - cinder:storage-backend
-  - cinder-ceph:storage-backend
-- - cinder:backup-backend
-  - cinder-backup:backup-backend
-- - keystone:shared-db
-  - percona-cluster:shared-db
-- - cinder:shared-db
-  - percona-cluster:shared-db
-- - cinder:identity-service
-  - keystone:identity-service
-- - cinder:amqp
-  - rabbitmq-server:amqp
-- - glance:image-service
-  - nova-compute:image-service
-- - glance:identity-service
-  - keystone:identity-service
-- - glance:shared-db
-  - percona-cluster:shared-db
-- - nova-compute:ceph-access
-  - cinder-ceph:ceph-access
-- - nova-compute:amqp
-  - rabbitmq-server:amqp
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+
 applications:
-  percona-cluster:
-    charm: cs:~openstack-charmers-next/percona-cluster
-    num_units: 1
+
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
     options:
-      innodb-buffer-pool-size: 256M
-      max-connections: 1000
-      source: distro
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
   keystone:
     charm: cs:~openstack-charmers-next/keystone
     num_units: 1
     options:
-      openstack-origin: distro
+      openstack-origin: *openstack-origin
+    to:
+      - '3'
+
   rabbitmq-server:
     charm: cs:~openstack-charmers-next/rabbitmq-server
     num_units: 1
+    to:
+      - '4'
+
   ceph-mon:
     charm: cs:~openstack-charmers-next/ceph-mon
     num_units: 3
     options:
       monitor-count: '3'
-      source: distro
+      source: *openstack-origin
+    to:
+      - '5'
+      - '6'
+      - '7'
+
   ceph-osd:
     charm: cs:~openstack-charmers-next/ceph-osd
     num_units: 3
@@ -57,26 +75,90 @@ applications:
       osd-devices: 'cinder,10G'
     options:
       osd-devices: '/dev/test-non-existent'
-      source: distro
+      source: *openstack-origin
+    to:
+      - '8'
+      - '9'
+      - '10'
+
   cinder:
     charm: cs:~openstack-charmers-next/cinder
     num_units: 1
     options:
       block-device: 'None'
       glance-api-version: '2'
-      openstack-origin: distro
+      openstack-origin: *openstack-origin
+    to:
+      - '11'
+
   cinder-backup:
     charm: ../../../cinder-backup
-    series: groovy
     options:
       ceph-osd-replication-count: 3
+
   cinder-ceph:
     charm: cs:~openstack-charmers-next/cinder-ceph
     options:
       ceph-osd-replication-count: 3
+
   glance:
     charm: cs:~openstack-charmers-next/glance
     num_units: 1
+    to:
+      - '12'
+
   nova-compute:
     charm: cs:~openstack-charmers-next/nova-compute
     num_units: 1
+    to:
+      - '13'
+
+relations:
+
+  - - 'cinder-backup:ceph'
+    - 'ceph-mon:client'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-mon:client'
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'cinder:storage-backend'
+    - 'cinder-ceph:storage-backend'
+
+  - - 'cinder:backup-backend'
+    - 'cinder-backup:backup-backend'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/tests.yaml b/tests/tests.yaml
index 92b2dd7..3575f5a 100644
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -2,7 +2,6 @@ charm_name: cinder-backup
 smoke_bundles:
   - bionic-train
 gate_bundles:
-  - trusty-mitaka
   - xenial-mitaka
   - xenial-ocata
   - xenial-pike
@@ -12,9 +11,13 @@ gate_bundles:
   - bionic-stein
   - bionic-train
   - bionic-ussuri
-dev_bundles:
-  - focal-ussuri
   - focal-victoria
   - groovy-victoria
+dev_bundles:
+  - trusty-mitaka  # fails often because of lp:1877076
+  - focal-ussuri   # disabled because of lp:1891626
 tests:
   - zaza.openstack.charm_tests.cinder_backup.tests.CinderBackupTest
+tests_options:
+  force_deploy:
+    - groovy-victoria