Merge "Add proxysql support for database"

This commit is contained in:
Zuul 2022-07-29 18:04:31 +00:00 committed by Gerrit Code Review
commit 6deebac611
23 changed files with 984 additions and 11 deletions

View File

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
#
# Copyright 2022 Michal Arbet (kevko)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kolla_ansible.database_shards import database_shards_info
class FilterModule(object):
"""Database shards filters"""
def filters(self):
return {
'database_shards_info': database_shards_info,
}

View File

@ -373,13 +373,17 @@ mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568" mariadb_ist_port: "4568"
mariadb_sst_port: "4444" mariadb_sst_port: "4444"
mariadb_clustercheck_port: "4569" mariadb_clustercheck_port: "4569"
mariadb_monitor_user: "haproxy" mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}"
mariadb_default_database_shard_id: 0 mariadb_default_database_shard_id: 0
mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}"
mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" mariadb_shard_id: "{{ mariadb_default_database_shard_id }}"
mariadb_shard_name: "shard_{{ mariadb_shard_id }}" mariadb_shard_name: "shard_{{ mariadb_shard_id }}"
mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}"
mariadb_loadbalancer: "haproxy" mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}"
mariadb_shard_root_user_prefix: "root_shard_"
mariadb_shard_backup_user_prefix: "backup_shard_"
mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}"
masakari_api_port: "15868" masakari_api_port: "15868"
@ -465,6 +469,8 @@ prometheus_elasticsearch_exporter_port: "9108"
# Prometheus blackbox-exporter ports # Prometheus blackbox-exporter ports
prometheus_blackbox_exporter_port: "9115" prometheus_blackbox_exporter_port: "9115"
proxysql_admin_port: "6032"
rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}"
rabbitmq_management_port: "15672" rabbitmq_management_port: "15672"
rabbitmq_cluster_port: "25672" rabbitmq_cluster_port: "25672"
@ -586,7 +592,7 @@ enable_openstack_core: "yes"
enable_glance: "{{ enable_openstack_core | bool }}" enable_glance: "{{ enable_openstack_core | bool }}"
enable_haproxy: "yes" enable_haproxy: "yes"
enable_keepalived: "{{ enable_haproxy | bool }}" enable_keepalived: "{{ enable_haproxy | bool }}"
enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool }}" enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}"
enable_keystone: "{{ enable_openstack_core | bool }}" enable_keystone: "{{ enable_openstack_core | bool }}"
enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}" enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}"
enable_mariadb: "yes" enable_mariadb: "yes"
@ -706,6 +712,7 @@ enable_ovs_dpdk: "no"
enable_osprofiler: "no" enable_osprofiler: "no"
enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
enable_prometheus: "no" enable_prometheus: "no"
enable_proxysql: "no"
enable_redis: "no" enable_redis: "no"
enable_sahara: "no" enable_sahara: "no"
enable_senlin: "no" enable_senlin: "no"

View File

@ -9,6 +9,15 @@ loadbalancer_services:
volumes: "{{ haproxy_default_volumes + haproxy_extra_volumes }}" volumes: "{{ haproxy_default_volumes + haproxy_extra_volumes }}"
dimensions: "{{ haproxy_dimensions }}" dimensions: "{{ haproxy_dimensions }}"
healthcheck: "{{ haproxy_healthcheck }}" healthcheck: "{{ haproxy_healthcheck }}"
proxysql:
container_name: proxysql
group: loadbalancer
enabled: "{{ enable_proxysql | bool }}"
image: "{{ proxysql_image_full }}"
privileged: False
volumes: "{{ proxysql_default_volumes + proxysql_extra_volumes }}"
dimensions: "{{ proxysql_dimensions }}"
healthcheck: "{{ proxysql_healthcheck }}"
keepalived: keepalived:
container_name: keepalived container_name: keepalived
group: loadbalancer group: loadbalancer
@ -30,6 +39,10 @@ haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_
haproxy_tag: "{{ openstack_tag }}" haproxy_tag: "{{ openstack_tag }}"
haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}" haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}"
proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/proxysql"
proxysql_tag: "{{ openstack_tag }}"
proxysql_image_full: "{{ proxysql_image }}:{{ proxysql_tag }}"
syslog_server: "{{ api_interface_address }}" syslog_server: "{{ api_interface_address }}"
syslog_haproxy_facility: "local1" syslog_haproxy_facility: "local1"
@ -44,6 +57,7 @@ haproxy_process_cpu_map: "no"
haproxy_defaults_max_connections: 10000 haproxy_defaults_max_connections: 10000
haproxy_dimensions: "{{ default_container_dimensions }}" haproxy_dimensions: "{{ default_container_dimensions }}"
proxysql_dimensions: "{{ default_container_dimensions }}"
keepalived_dimensions: "{{ default_container_dimensions }}" keepalived_dimensions: "{{ default_container_dimensions }}"
haproxy_enable_healthchecks: "{{ enable_container_healthchecks }}" haproxy_enable_healthchecks: "{{ enable_container_healthchecks }}"
@ -59,21 +73,58 @@ haproxy_healthcheck:
test: "{% if haproxy_enable_healthchecks | bool %}{{ haproxy_healthcheck_test }}{% else %}NONE{% endif %}" test: "{% if haproxy_enable_healthchecks | bool %}{{ haproxy_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ haproxy_healthcheck_timeout }}" timeout: "{{ haproxy_healthcheck_timeout }}"
proxysql_enable_healthchecks: "{{ enable_container_healthchecks }}"
proxysql_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
proxysql_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
proxysql_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
proxysql_healthcheck_test: ["CMD-SHELL", "healthcheck_listen proxysql {{ proxysql_admin_port }}"]
proxysql_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
proxysql_healthcheck:
interval: "{{ proxysql_healthcheck_interval }}"
retries: "{{ proxysql_healthcheck_retries }}"
start_period: "{{ proxysql_healthcheck_start_period }}"
test: "{% if proxysql_enable_healthchecks | bool %}{{ proxysql_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ proxysql_healthcheck_timeout }}"
haproxy_default_volumes: haproxy_default_volumes:
- "{{ node_config_directory }}/haproxy/:{{ container_config_directory }}/:ro" - "{{ node_config_directory }}/haproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro" - "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "haproxy_socket:/var/lib/kolla/haproxy/" - "haproxy_socket:/var/lib/kolla/haproxy/"
proxysql_default_volumes:
- "{{ node_config_directory }}/proxysql/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "proxysql:/var/lib/proxysql/"
- "proxysql_socket:/var/lib/kolla/proxysql/"
keepalived_default_volumes: keepalived_default_volumes:
- "{{ node_config_directory }}/keepalived/:{{ container_config_directory }}/:ro" - "{{ node_config_directory }}/keepalived/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro" - "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro" - "/lib/modules:/lib/modules:ro"
- "haproxy_socket:/var/lib/kolla/haproxy/" - "{{ 'haproxy_socket:/var/lib/kolla/haproxy/' if enable_haproxy | bool else '' }}"
- "{{ 'proxysql_socket:/var/lib/kolla/proxysql/' if enable_proxysql | bool else '' }}"
haproxy_extra_volumes: "{{ default_extra_volumes }}" haproxy_extra_volumes: "{{ default_extra_volumes }}"
proxysql_extra_volumes: "{{ default_extra_volumes }}"
keepalived_extra_volumes: "{{ default_extra_volumes }}" keepalived_extra_volumes: "{{ default_extra_volumes }}"
# Default proxysql values
proxysql_workers: "{{ openstack_service_workers }}"
# The maximum number of client connections that the proxy can handle.
# After this number is reached, new connections will be rejected with
# the #HY000 error, and the error message Too many connections.
#
# As proxysql can route queries to several mariadb clusters, this
# value is set to 4x {{ proxysql_backend_max_connections }}
proxysql_max_connections: 40000
# The maximum number of connections to mariadb backends.
proxysql_backend_max_connections: 10000
proxysql_admin_user: "kolla-admin"
proxysql_stats_user: "kolla-stats"
# Default timeout values # Default timeout values
haproxy_http_request_timeout: "10s" haproxy_http_request_timeout: "10s"
haproxy_http_keep_alive_timeout: "10s" haproxy_http_keep_alive_timeout: "10s"

View File

@ -23,6 +23,7 @@
- kolla_action != "config" - kolla_action != "config"
listen: listen:
- Restart haproxy container - Restart haproxy container
- Restart proxysql container
- Restart keepalived container - Restart keepalived container
- name: Group HA nodes by status - name: Group HA nodes by status
@ -35,6 +36,7 @@
- kolla_action != "config" - kolla_action != "config"
listen: listen:
- Restart haproxy container - Restart haproxy container
- Restart proxysql container
- Restart keepalived container - Restart keepalived container
- name: Stop backup keepalived container - name: Stop backup keepalived container
@ -71,6 +73,26 @@
- Restart haproxy container - Restart haproxy container
- Restart keepalived container - Restart keepalived container
- name: Stop backup proxysql container
become: true
kolla_docker:
action: "stop_container"
# NOTE(kevko): backup node might not have proxysql yet - ignore
ignore_missing: true
common_options: "{{ docker_common_options }}"
name: "proxysql"
when:
- kolla_action != "config"
- groups.kolla_ha_is_master_False is defined
- inventory_hostname in groups.kolla_ha_is_master_False
listen:
# NOTE(kevko): We need the following "Restart haproxy container" as
# there is nothing to trigger "Restart proxysql container" when
# proxysql is deconfigured.
- Restart haproxy container
- Restart proxysql container
- Restart keepalived container
- name: Start backup haproxy container - name: Start backup haproxy container
vars: vars:
service_name: "haproxy" service_name: "haproxy"
@ -101,6 +123,40 @@
host: "{{ api_interface_address }}" host: "{{ api_interface_address }}"
port: "{{ haproxy_monitor_port }}" port: "{{ haproxy_monitor_port }}"
- name: Start backup proxysql container
vars:
service_name: "proxysql"
service: "{{ loadbalancer_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"
- groups.kolla_ha_is_master_False is defined
- inventory_hostname in groups.kolla_ha_is_master_False
- service.enabled | bool
listen:
# NOTE(kevko): We need the following "Restart haproxy container" as
# there is nothing to trigger "Restart proxysql container" when
# proxysql is configured.
- Restart haproxy container
- Restart proxysql container
- Restart keepalived container
notify:
- Wait for backup proxysql to start
- name: Wait for backup proxysql to start
wait_for:
host: "{{ api_interface_address }}"
port: "{{ proxysql_admin_port }}"
- name: Start backup keepalived container - name: Start backup keepalived container
vars: vars:
service_name: "keepalived" service_name: "keepalived"
@ -124,7 +180,7 @@
notify: notify:
- Wait for virtual IP to appear - Wait for virtual IP to appear
# NOTE(yoctozepto): This is to ensure haproxy can close any open connections # NOTE(yoctozepto): This is to ensure haproxy, proxysql can close any open connections
# to the VIP address. # to the VIP address.
- name: Stop master haproxy container - name: Stop master haproxy container
become: true become: true
@ -139,6 +195,22 @@
- groups.kolla_ha_is_master_True is defined - groups.kolla_ha_is_master_True is defined
- inventory_hostname in groups.kolla_ha_is_master_True - inventory_hostname in groups.kolla_ha_is_master_True
listen: listen:
- Restart haproxy container
- Restart keepalived container
- name: Stop master proxysql container
become: true
kolla_docker:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "proxysql"
ignore_missing: true
when:
- kolla_action != "config"
- groups.kolla_ha_is_master_True is defined
- inventory_hostname in groups.kolla_ha_is_master_True
listen:
- Restart proxysql container
- Restart keepalived container - Restart keepalived container
- name: Stop master keepalived container - name: Stop master keepalived container
@ -184,6 +256,36 @@
host: "{{ api_interface_address }}" host: "{{ api_interface_address }}"
port: "{{ haproxy_monitor_port }}" port: "{{ haproxy_monitor_port }}"
- name: Start master proxysql container
vars:
service_name: "proxysql"
service: "{{ loadbalancer_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"
- groups.kolla_ha_is_master_True is defined
- inventory_hostname in groups.kolla_ha_is_master_True
- service.enabled | bool
listen:
- Restart proxysql container
- Restart keepalived container
notify:
- Wait for master proxysql to start
- name: Wait for master proxysql to start
wait_for:
host: "{{ api_interface_address }}"
port: "{{ proxysql_admin_port }}"
- name: Start master keepalived container - name: Start master keepalived container
vars: vars:
service_name: "keepalived" service_name: "keepalived"
@ -218,3 +320,15 @@
- service.enabled | bool - service.enabled | bool
listen: listen:
- Wait for virtual IP to appear - Wait for virtual IP to appear
- name: Wait for proxysql to listen on VIP
vars:
service_name: "proxysql"
service: "{{ loadbalancer_services[service_name] }}"
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ proxysql_admin_port }}"
when:
- service.enabled | bool
listen:
- Wait for virtual IP to appear

View File

@ -26,6 +26,86 @@
- inventory_hostname in groups[service.group] - inventory_hostname in groups[service.group]
- service.enabled | bool - service.enabled | bool
- name: Ensuring proxysql service config subdirectories exist
vars:
service: "{{ loadbalancer_services['proxysql'] }}"
file:
path: "{{ node_config_directory }}/proxysql/{{ item }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
with_items:
- "users"
- "rules"
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- name: Ensuring keepalived checks subdir exists
vars:
service: "{{ loadbalancer_services['keepalived'] }}"
file:
path: "{{ node_config_directory }}/keepalived/checks"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- name: Remove mariadb.cfg if proxysql enabled
vars:
service: "{{ loadbalancer_services['keepalived'] }}"
file:
path: "{{ node_config_directory }}/haproxy/services.d/mariadb.cfg"
state: absent
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- loadbalancer_services.proxysql.enabled | bool
notify:
- Restart haproxy container
- name: Removing checks for services which are disabled
vars:
service: "{{ loadbalancer_services['keepalived'] }}"
file:
path: "{{ node_config_directory }}/keepalived/checks/check_alive_{{ item.key }}.sh"
state: absent
become: true
with_dict: "{{ loadbalancer_services }}"
when:
- inventory_hostname in groups[service.group]
- item.key != 'keepalived'
- not item.value.enabled | bool
or not inventory_hostname in groups[item.value.group]
- service.enabled | bool
notify:
- Restart keepalived container
- name: Copying checks for services which are enabled
vars:
service: "{{ loadbalancer_services['keepalived'] }}"
template:
src: "keepalived/check_alive_{{ item.key }}.sh.j2"
dest: "{{ node_config_directory }}/keepalived/checks/check_alive_{{ item.key }}.sh"
mode: "0770"
become: true
with_dict: "{{ loadbalancer_services }}"
when:
- inventory_hostname in groups[service.group]
- inventory_hostname in groups[item.value.group]
- item.key != 'keepalived'
- item.value.enabled | bool
- service.enabled | bool
notify:
- Restart keepalived container
- name: Copying over config.json files for services - name: Copying over config.json files for services
template: template:
src: "{{ item.key }}/{{ item.key }}.json.j2" src: "{{ item.key }}/{{ item.key }}.json.j2"
@ -57,6 +137,24 @@
notify: notify:
- Restart haproxy container - Restart haproxy container
- name: Copying over proxysql config
vars:
service: "{{ loadbalancer_services['proxysql'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/proxysql/proxysql.yaml"
mode: "0660"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_first_found:
- "{{ node_custom_config }}/proxysql/{{ inventory_hostname }}/proxysql.yaml"
- "{{ node_custom_config }}/proxysql/proxysql.yaml"
- "proxysql/proxysql.yaml.j2"
notify:
- Restart proxysql container
- name: Copying over custom haproxy services configuration - name: Copying over custom haproxy services configuration
vars: vars:
service: "{{ loadbalancer_services['haproxy'] }}" service: "{{ loadbalancer_services['haproxy'] }}"
@ -148,3 +246,21 @@
- "haproxy/haproxy_run.sh.j2" - "haproxy/haproxy_run.sh.j2"
notify: notify:
- Restart haproxy container - Restart haproxy container
- name: Copying over proxysql start script
vars:
service: "{{ loadbalancer_services['proxysql'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/proxysql/proxysql_run.sh"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_first_found:
- "{{ node_custom_config }}/proxysql/{{ inventory_hostname }}/proxysql_run.sh"
- "{{ node_custom_config }}/proxysql/proxysql_run.sh"
- "proxysql/proxysql_run.sh.j2"
notify:
- Restart proxysql container

View File

@ -10,6 +10,7 @@
kolla_container_facts: kolla_container_facts:
name: name:
- haproxy - haproxy
- proxysql
- keepalived - keepalived
register: container_facts register: container_facts
@ -29,6 +30,14 @@
- enable_haproxy | bool - enable_haproxy | bool
- inventory_hostname in groups['loadbalancer'] - inventory_hostname in groups['loadbalancer']
- name: Group hosts by whether they are running ProxySQL
group_by:
key: "proxysql_running_{{ container_facts['proxysql'] is defined }}"
changed_when: false
when:
- enable_proxysql | bool
- inventory_hostname in groups['loadbalancer']
- name: Set facts about whether we can run HAProxy and keepalived VIP prechecks - name: Set facts about whether we can run HAProxy and keepalived VIP prechecks
vars: vars:
# NOTE(mgoddard): We can only reliably run this precheck if all hosts in # NOTE(mgoddard): We can only reliably run this precheck if all hosts in
@ -38,6 +47,7 @@
set_fact: set_fact:
keepalived_vip_prechecks: "{{ all_hosts_in_batch and groups['keepalived_running_True'] is not defined }}" keepalived_vip_prechecks: "{{ all_hosts_in_batch and groups['keepalived_running_True'] is not defined }}"
haproxy_vip_prechecks: "{{ all_hosts_in_batch and groups['haproxy_running_True'] is not defined }}" haproxy_vip_prechecks: "{{ all_hosts_in_batch and groups['haproxy_running_True'] is not defined }}"
proxysql_vip_prechecks: "{{ all_hosts_in_batch and groups['proxysql_running_True'] is not defined }}"
- name: Checking if external haproxy certificate exists - name: Checking if external haproxy certificate exists
run_once: true run_once: true
@ -143,6 +153,31 @@
- inventory_hostname in groups['loadbalancer'] - inventory_hostname in groups['loadbalancer']
- api_interface_address != kolla_internal_vip_address - api_interface_address != kolla_internal_vip_address
- name: Checking free port for ProxySQL admin (api interface)
wait_for:
host: "{{ api_interface_address }}"
port: "{{ proxysql_admin_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_proxysql | bool
- container_facts['proxysql'] is not defined
- inventory_hostname in groups['loadbalancer']
- name: Checking free port for ProxySQL admin (vip interface)
wait_for:
host: "{{ kolla_internal_vip_address }}"
port: "{{ proxysql_admin_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- enable_proxysql | bool
- proxysql_vip_prechecks
- inventory_hostname in groups['loadbalancer']
- api_interface_address != kolla_internal_vip_address
# FIXME(yoctozepto): this req seems arbitrary, they need not be, just routable is fine # FIXME(yoctozepto): this req seems arbitrary, they need not be, just routable is fine
- name: Checking if kolla_internal_vip_address is in the same network as api_interface on all nodes - name: Checking if kolla_internal_vip_address is in the same network as api_interface on all nodes
become: true become: true
@ -470,7 +505,7 @@
- haproxy_stat.find('manila_api') == -1 - haproxy_stat.find('manila_api') == -1
- haproxy_vip_prechecks - haproxy_vip_prechecks
- name: Checking free port for MariaDB HAProxy - name: Checking free port for MariaDB HAProxy/ProxySQL
wait_for: wait_for:
host: "{{ kolla_internal_vip_address }}" host: "{{ kolla_internal_vip_address }}"
port: "{{ database_port }}" port: "{{ database_port }}"
@ -481,7 +516,7 @@
- enable_mariadb | bool - enable_mariadb | bool
- inventory_hostname in groups['loadbalancer'] - inventory_hostname in groups['loadbalancer']
- haproxy_stat.find('mariadb') == -1 - haproxy_stat.find('mariadb') == -1
- haproxy_vip_prechecks - haproxy_vip_prechecks or proxysql_vip_prechecks
- name: Checking free port for Masakari API HAProxy - name: Checking free port for Masakari API HAProxy
wait_for: wait_for:

View File

@ -0,0 +1,6 @@
#!/bin/bash
# This will return 0 when it successfully talks to the haproxy daemon via the socket
# Failures return 1
echo "show info" | socat unix-connect:/var/lib/kolla/haproxy/haproxy.sock stdio > /dev/null

View File

@ -0,0 +1,6 @@
#!/bin/bash
# This will return 0 when it successfully talks to the ProxySQL daemon via localhost
# Failures return 1
echo "show info" | socat unix-connect:/var/lib/kolla/proxysql/admin.sock stdio > /dev/null

View File

@ -6,6 +6,12 @@
"dest": "/etc/keepalived/keepalived.conf", "dest": "/etc/keepalived/keepalived.conf",
"owner": "root", "owner": "root",
"perm": "0600" "perm": "0600"
},
{
"source": "{{ container_config_directory }}/checks/",
"dest": "/checks",
"owner": "root",
"perm": "0770"
} }
] ]
} }

View File

@ -0,0 +1,29 @@
{
"command": "/etc/proxysql_run.sh",
"config_files": [
{
"source": "{{ container_config_directory }}/proxysql_run.sh",
"dest": "/etc/proxysql_run.sh",
"owner": "proxysql",
"perm": "0700"
},
{
"source": "{{ container_config_directory }}/proxysql.yaml",
"dest": "/etc/proxysql/proxysql.yaml",
"owner": "proxysql",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/users/",
"dest": "/etc/proxysql/users",
"owner": "proxysql",
"perm": "0700"
},
{
"source": "{{ container_config_directory }}/rules/",
"dest": "/etc/proxysql/rules",
"owner": "proxysql",
"perm": "0700"
}
]
}

View File

@ -0,0 +1,55 @@
# This configuration file is used to configure proxysql.
#
# Admin_variables: https://proxysql.com/documentation/global-variables/admin-variables
# Mysql_variables: https://proxysql.com/documentation/global-variables/mysql-variables
# Mysql_servers: https://proxysql.com/documentation/main-runtime/#mysql_servers
# Mysql_galera_hostgroups: https://proxysql.com/documentation/main-runtime/#mysql_galera_hostgroups
datadir: "/var/lib/proxysql"
errorlog: "/var/log/kolla/proxysql/proxysql.log"
admin_variables:
admin_credentials: "{{ proxysql_admin_user }}:{{ proxysql_admin_password }}"
mysql_ifaces: "{{ api_interface_address }}:{{ proxysql_admin_port }};{{ kolla_internal_vip_address }}:{{ proxysql_admin_port }};/var/lib/kolla/proxysql/admin.sock"
stats_credentials: "{{ proxysql_stats_user }}:{{ proxysql_stats_password }}"
mysql_variables:
threads: {{ proxysql_workers }}
max_connections: {{ proxysql_max_connections }}
interfaces: "{{ kolla_internal_vip_address }}:{{ database_port }}"
monitor_username: "{{ mariadb_monitor_user }}"
monitor_password: "{{ mariadb_monitor_password }}"
mysql_servers:
{% for shard_id, shard in mariadb_shards_info.shards.items() %}
{% set WRITER_GROUP = shard_id | int * 10 %}
{% for host in shard.hosts %}
{% if loop.first %}
{% set WEIGHT = 100 %}
{% else %}
{% set WEIGHT = 10 %}
{% endif %}
- address: "{{ 'api' | kolla_address(host) }}"
port : {{ database_port }}
hostgroup : {{ WRITER_GROUP }}
max_connections: {{ proxysql_backend_max_connections }}
weight : {{ WEIGHT }}
comment : "Writer {{ host }}"
{% endfor %}
{% endfor %}
mysql_galera_hostgroups:
{% for shard_id, shard in mariadb_shards_info.shards.items() %}
{% set WRITER_GROUP = shard_id | int * 10 %}
{% set BACKUP_WRITER_GROUP = WRITER_GROUP | int + 1 %}
{% set READER_GROUP = BACKUP_WRITER_GROUP | int + 1 %}
{% set OFFLINE_GROUP = READER_GROUP | int + 1 %}
- writer_hostgroup: {{ WRITER_GROUP }}
backup_writer_hostgroup: {{ BACKUP_WRITER_GROUP }}
reader_hostgroup: {{ READER_GROUP }}
offline_hostgroup: {{ OFFLINE_GROUP }}
max_connections: {{ proxysql_backend_max_connections }}
max_writers: 1
writer_is_also_reader: 0
comment: "Galera cluster for shard {{ shard_id }}"
{% endfor %}

View File

@ -0,0 +1,7 @@
#!/bin/bash
PROXYSQL_LOG_FILE="/var/log/kolla/proxysql/proxysql.log"
proxysql \
--idle-threads \
--no-version-check -f -c /etc/proxysql.cnf >> ${PROXYSQL_LOG_FILE} 2>&1

View File

@ -42,7 +42,7 @@ mariadb_services:
dimensions: "{{ mariadb_clustercheck_dimensions }}" dimensions: "{{ mariadb_clustercheck_dimensions }}"
environment: environment:
MYSQL_USERNAME: "{{ mariadb_monitor_user }}" MYSQL_USERNAME: "{{ mariadb_monitor_user }}"
MYSQL_PASSWORD: "" MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
MYSQL_HOST: "{{ api_interface_address }}" MYSQL_HOST: "{{ api_interface_address }}"
AVAILABLE_WHEN_DONOR: "1" AVAILABLE_WHEN_DONOR: "1"
@ -107,7 +107,7 @@ mariabackup_image_full: "{{ mariabackup_image }}:{{ mariabackup_tag }}"
mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}" mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}"
mariadb_backup_database_schema: "PERCONA_SCHEMA" mariadb_backup_database_schema: "PERCONA_SCHEMA"
mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}backup_{{ mariadb_shard_name }}{% endif %}" mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}"
mariadb_backup_type: "full" mariadb_backup_type: "full"
mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}" mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}"
@ -119,4 +119,4 @@ enable_mariadb_clustercheck: "{{ enable_haproxy }}"
#################### ####################
# Sharding # Sharding
#################### ####################
mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}root_{{ mariadb_shard_name }}{% endif %}" mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}"

View File

@ -1,7 +1,56 @@
--- ---
# NOTE(kevko): We have to ignore errors
# as new deployments have no galera
# running. In that case, user will be created
# in mariadb role.
#
# It doesn't matter that creating monitor user
# is also in the mariadb role.
#
# If user is switching from haproxy to proxysql,
# monitor user has to be created before proxysql
# will start, otherwise proxysql will evaluate
# mariadb backends are down, because no monitor
# user (only old haproxy user without pass).
#
# Creating monitor user in mariadb role is too late.
- name: Ensure mysql monitor user exist
vars:
shard_id: "{{ item.key }}"
host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}"
become: true
kolla_toolbox:
module_name: mysql_user
module_args:
login_host: "{{ host }}"
login_port: "{{ mariadb_port }}"
login_user: "{{ database_user }}"
login_password: "{{ database_password }}"
name: "{{ mariadb_monitor_user }}"
password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
host: "%"
priv: "*.*:USAGE"
tags: always
with_dict: "{{ mariadb_shards_info.shards }}"
loop_control:
label: "{{ host }}"
failed_when: False
run_once: True
- name: "Configure haproxy for {{ project_name }}" - name: "Configure haproxy for {{ project_name }}"
import_role: import_role:
name: haproxy-config name: haproxy-config
vars: vars:
project_services: "{{ mariadb_services }}" project_services: "{{ mariadb_services }}"
tags: always tags: always
when: not enable_proxysql | bool
- name: "Configure proxysql for {{ project_name }}"
import_role:
name: proxysql-config
vars:
project: "mariadb"
project_database_shard: "{{ mariadb_shards_info }}"
tags: always
when: enable_proxysql | bool

View File

@ -25,7 +25,7 @@
login_user: "{{ database_user }}" login_user: "{{ database_user }}"
login_password: "{{ database_password }}" login_password: "{{ database_password }}"
name: "{{ mariadb_monitor_user }}" name: "{{ mariadb_monitor_user }}"
password: "" password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
host: "%" host: "%"
priv: "*.*:USAGE" priv: "*.*:USAGE"
when: when:

View File

@ -0,0 +1,6 @@
---
proxysql_project_database_shard: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_database_shard') }}"
# NOTE(kevko): Kolla_role_name and replace is used only because of nova-cell
proxysql_project: "{{ kolla_role_name | default(project_name) | replace('_','-') }}"
proxysql_config_users: "{% if proxysql_project_database_shard is defined and proxysql_project_database_shard['users'] is defined %}True{% else %}False{% endif %}"
proxysql_config_rules: "{% if proxysql_project_database_shard is defined and proxysql_project_database_shard['rules'] is defined %}True{% else %}False{% endif %}"

View File

@ -0,0 +1,24 @@
---
- name: "Copying over {{ proxysql_project }} users config"
template:
src: "users.yaml.j2"
dest: "{{ node_config_directory }}/proxysql/users/{{ proxysql_project }}.yaml"
mode: "0660"
become: true
when:
- enable_proxysql | bool
- proxysql_config_users | bool
notify:
- Restart proxysql container
- name: "Copying over {{ proxysql_project }} rules config"
template:
src: "rules.yaml.j2"
dest: "{{ node_config_directory }}/proxysql/rules/{{ proxysql_project }}.yaml"
mode: "0660"
become: true
when:
- enable_proxysql | bool
- proxysql_config_rules | bool
notify:
- Restart proxysql container

View File

@ -0,0 +1,18 @@
# This configuration file is used to configure proxysql rules,
# in our case we define the schemaname and the mysql galera cluster
# group which query is routed to.
#
# Query rules are a very powerful vehicle to control traffic passing
# through ProxySQL and are configured in the mysql_query_rules table:
#
# ProxySQL Admin> SHOW CREATE TABLE mysql_query_rules\G
#
# https://proxysql.com/documentation/main-runtime/#mysql_query_rules
mysql_query_rules:
{% for rule in proxysql_project_database_shard['rules'] %}
{% set WRITER_GROUP = rule['shard_id'] | int * 10 %}
- schemaname: "{{ rule['schema'] }}"
destination_hostgroup: {{ WRITER_GROUP }}
apply: 1
active: 1
{% endfor %}

View File

@ -0,0 +1,28 @@
# This configuration file is used to configure proxysql users,
# in our case we just define default_hostgroup and the mysql galera
# cluster group where user is routed to.
#
# This is used especially when services are creating databases, users
# and connects via user 'root_shard_SHARD_ID', so ProxySQL know
# where to route this query.
#
# Table mysql_users defines MySQL users that clients can use to connect to
# ProxySQL, and then used to connect to backends.
#
# ProxySQL Admin> SHOW CREATE TABLE mysql_users\G
#
# https://proxysql.com/documentation/main-runtime/#mysql_users
mysql_users:
{% for user in proxysql_project_database_shard['users'] %}
{% if user['shard_id'] is defined %}
{% set WRITER_GROUP = user['shard_id'] | int * 10 %}
{% endif %}
- username: "{{ user['user'] }}"
password: "{{ user['password'] }}"
{% if user['shard_id'] is defined %}
default_hostgroup: {{ WRITER_GROUP }}
{% endif %}
transaction_persistent: 1
active: 1
{% endfor %}

View File

@ -390,6 +390,7 @@ workaround_ansible_issue_8743: yes
#enable_osprofiler: "no" #enable_osprofiler: "no"
#enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" #enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
#enable_prometheus: "no" #enable_prometheus: "no"
#enable_proxysql: "no"
#enable_redis: "no" #enable_redis: "no"
#enable_sahara: "no" #enable_sahara: "no"
#enable_senlin: "no" #enable_senlin: "no"

View File

@ -15,6 +15,8 @@ cinder_rbd_secret_uuid:
database_password: database_password:
# Password for the dedicated backup user account # Password for the dedicated backup user account
mariadb_backup_database_password: mariadb_backup_database_password:
# Password for the monitor user
mariadb_monitor_password:
#################### ####################
# Docker options # Docker options
@ -260,3 +262,9 @@ ceph_rgw_keystone_password:
# libvirt options # libvirt options
################## ##################
libvirt_sasl_password: libvirt_sasl_password:
############
# ProxySQL
############
proxysql_admin_password:
proxysql_stats_password:

View File

@ -0,0 +1,130 @@
# -*- coding: utf-8 -*-
#
# Copyright 2022 Michal Arbet (kevko)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jinja2.filters import pass_context
from jinja2.runtime import Undefined
from kolla_ansible.exception import FilterError
from kolla_ansible.helpers import _call_bool_filter
@pass_context
def database_shards_info(context, hostnames):
"""returns dict with database shards info
Returned dict looks as example below:
"database_shards_info": {
"shards": {
"0": {
"hosts": [
"controller0",
"controller1",
"controller2"
]
},
"1": {
"hosts": [
"controller3",
"controller4",
"controller5"
]
}
},
"users": [
{
"password": "secret",
"shard_id": "0",
"user": "root_shard_0"
},
{
"password": "secret",
"shard_id": "0",
"user": "backup_shard_0"
},
{
"password": "secret",
"shard_id": "1",
"user": "root_shard_1"
},
{
"password": "secret",
"shard_id": "1",
"user": "backup_shard_1"
}
]
}
:param context: Jinja2 Context
:param hostnames: List of database hosts
:returns: Dict with database shards info
"""
hostvars = context.get('hostvars')
if isinstance(hostvars, Undefined):
raise FilterError("'hostvars' variable is unavailable")
shards_info = {'shards': {}, 'users': []}
for hostname in hostnames:
host = hostvars.get(hostname)
if isinstance(host, Undefined):
raise FilterError(f"'{hostname}' not in 'hostvars'")
host_shard_id = host.get('mariadb_shard_id')
if host_shard_id is None:
raise FilterError(f"'mariadb_shard_id' is undefined "
"for host '{hostname}'")
else:
host_shard_id = str(host_shard_id)
if host_shard_id not in shards_info['shards']:
shards_info['shards'][host_shard_id] = {'hosts': [hostname]}
backup_enabled = host.get('enable_mariabackup')
if backup_enabled is None:
raise FilterError("'enable_mariabackup' variable is "
"unavailable")
backup_enabled = _call_bool_filter(context, backup_enabled)
db_password = host.get('database_password')
if db_password is None:
raise FilterError("'database_password' variable is "
"unavailable")
db_root_prefix = host.get('mariadb_shard_root_user_prefix')
if db_root_prefix is None:
raise FilterError("'mariadb_shard_root_user_prefix' variable "
"is unavailable")
db_user = f"{db_root_prefix}{host_shard_id}"
user_dict = {'password': db_password, 'user': db_user,
'shard_id': host_shard_id}
shards_info['users'].append(user_dict)
if backup_enabled:
db_backup_prefix = host.get('mariadb_shard_backup_user_prefix')
if db_backup_prefix is None:
raise FilterError("'mariadb_shard_backup_user_prefix' "
"variable is unavailable")
db_user = f"{db_backup_prefix}{host_shard_id}"
user_dict = {'password': db_password, 'user': db_user,
'shard_id': host_shard_id}
shards_info['users'].append(user_dict)
else:
shards_info['shards'][host_shard_id]['hosts'].append(hostname)
return shards_info

View File

@ -0,0 +1,251 @@
# -*- coding: utf-8 -*-
#
# Copyright 2022 Michal Arbet (kevko)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import jinja2
from kolla_ansible.database_shards import database_shards_info
from kolla_ansible.exception import FilterError
from kolla_ansible.tests.unit.helpers import _to_bool
class TestKollaDatabaseShardsInfoFilter(unittest.TestCase):
def setUp(self):
# Bandit complains about Jinja2 autoescaping without nosec.
self.env = jinja2.Environment() # nosec
self.env.filters['bool'] = _to_bool
def _make_context(self, parent):
return self.env.context_class(
self.env, parent=parent, name='dummy', blocks={})
def test_missing_shard_id(self):
hostnames = ["primary"]
context = self._make_context({
'inventory_hostname': 'primary',
'hostvars': {
'primary': {
}
}
})
self.assertRaises(FilterError, database_shards_info,
context, hostnames)
def test_valid_shards_info_with_backup_user(self):
hostnames = ['primary', 'secondary1', 'secondary2']
enable_mariabackup = 'yes'
root_prefix = 'root_shard_'
backup_prefix = 'backup_shard_'
db_cred = 'SECRET'
db_shards = ['0', '1']
context = self._make_context({
'inventory_hostname': 'primary',
'hostvars': {
'primary': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
'secondary1': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
'secondary2': {
'mariadb_shard_id': db_shards[1],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
},
})
result = {
"shards": {
db_shards[0]: {
"hosts": [
"primary",
"secondary1"
]
},
db_shards[1]: {
"hosts": [
"secondary2"
]
}
},
"users": [
{
"password": db_cred,
"shard_id": db_shards[0],
"user": f"{root_prefix}0"
},
{
"password": db_cred,
"shard_id": db_shards[0],
"user": f"{backup_prefix}0"
},
{
"password": db_cred,
"shard_id": db_shards[1],
"user": f"{root_prefix}1"
},
{
"password": db_cred,
"shard_id": db_shards[1],
"user": f"{backup_prefix}1"
}
]
}
self.assertEqual(result, database_shards_info(context, hostnames))
def test_valid_shards_info_without_backup_user(self):
hostnames = ['primary', 'secondary1', 'secondary2']
enable_mariabackup = 'no'
root_prefix = 'root_shard_'
db_cred = 'SECRET'
db_shards = ['0', '1']
context = self._make_context({
'inventory_hostname': 'primary',
'hostvars': {
'primary': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
},
'secondary1': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
},
'secondary2': {
'mariadb_shard_id': db_shards[1],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
},
},
})
result = {
"shards": {
db_shards[0]: {
"hosts": [
"primary",
"secondary1"
]
},
db_shards[1]: {
"hosts": [
"secondary2"
]
}
},
"users": [
{
"password": db_cred,
"shard_id": db_shards[0],
"user": f"{root_prefix}0"
},
{
"password": db_cred,
"shard_id": db_shards[1],
"user": f"{root_prefix}1"
}
]
}
self.assertEqual(result, database_shards_info(context, hostnames))
def test_valid_shards_info_with_different_users_and_pass(self):
hostnames = ['primary', 'secondary1', 'secondary2']
enable_mariabackup = 'yes'
root_prefix = 'superman_shard_'
root_prefix_2 = 'batman_shard_'
backup_prefix = 'backupman_shard_'
db_cred = 'kRypTonyte'
db_shards = ['0', '1']
context = self._make_context({
'inventory_hostname': 'primary',
'hostvars': {
'primary': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
'secondary1': {
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
'secondary2': {
'mariadb_shard_id': db_shards[1],
'enable_mariabackup': 'no',
'database_password': db_cred,
'mariadb_shard_root_user_prefix': root_prefix_2,
},
},
})
result = {
"shards": {
db_shards[0]: {
"hosts": [
"primary",
"secondary1"
]
},
db_shards[1]: {
"hosts": [
"secondary2"
]
}
},
"users": [
{
"password": db_cred,
"shard_id": db_shards[0],
"user": f"{root_prefix}0"
},
{
"password": db_cred,
"shard_id": db_shards[0],
"user": f"{backup_prefix}0"
},
{
"password": db_cred,
"shard_id": db_shards[1],
"user": f"{root_prefix_2}1"
},
]
}
self.assertEqual(result, database_shards_info(context, hostnames))