
For couple of cycles we include our systemd-related roles in openstack_hosts role, with purpose of operators being able to configure their nodes before any actuall service or infra role will kick in. This should also provide ability to configure networking for LXC and setup overall. So with moving systemd role includes during AIO setup to variables which leverage openstack_hosts behavior we achieve multiple goals at once: - Reduce amount of intersecting logic in AIO bootstrap - Add testing for openstack_hosts section and ensure it's functional - Improve transparency for deployers and ability to see and change network configuration, as it will be templated as variables file instead of be directly applied. As a consequence, we need to access host by it's already established IP address, rather then one defined by us. However, we already require having `bootstrap_host_public_address` and rely on it heavily, so it should not bring much regressions with the approach. Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-haproxy_server/+/953670 Depends-On: https://review.opendev.org/c/openstack/openstack-ansible/+/954316 Change-Id: Ib688a5a758ec5ebbd0ebafdcaf2b27aeecdd9aba Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
377 lines
11 KiB
Django/Jinja
377 lines
11 KiB
Django/Jinja
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
## General options
|
|
debug: True
|
|
|
|
## Installation method for OpenStack services
|
|
install_method: "{{ bootstrap_host_install_method }}"
|
|
{% if bootstrap_host_install_method == 'distro' %}
|
|
galera_install_method: distro
|
|
rabbitmq_install_method: distro
|
|
ceph_pkg_source: distro
|
|
{% endif %}
|
|
|
|
## Tempest settings
|
|
tempest_public_subnet_cidr: "{{ vlan_network }}"
|
|
tempest_public_subnet_allocation_pools: "{{ vlan_network | ansible.utils.nthhost('110') }}-{{ vlan_network | ansible.utils.nthhost('-10') }}"
|
|
|
|
{% if _neutron_plugin_driver == 'ml2.ovn' %}
|
|
tempest_private_net_provider_type: geneve
|
|
{% else %}
|
|
tempest_private_net_provider_type: vxlan
|
|
{% endif %}
|
|
tempest_public_net_physical_name: 'physnet1'
|
|
|
|
# systemd-journald limitations
|
|
openstack_hosts_journald_config:
|
|
RateLimitIntervalSec: 60
|
|
RateLimitBurst: 600
|
|
SystemMaxUse: 4G
|
|
RuntimeMaxUse: 2G
|
|
SystemMaxFileSize: 100M
|
|
RuntimeMaxFileSize: 100M
|
|
|
|
{% raw %}
|
|
openstack_hosts_systemd_services: "{{ query('vars', *query('varnames', '^aio_systemd_services_')) | flatten(levels=1) }}"
|
|
openstack_hosts_systemd_networkd_networks: "{{ query('vars', *query('varnames', '^aio_systemd_networkd_networks_')) | flatten(levels=1) }}"
|
|
openstack_hosts_systemd_networkd_devices: "{{ query('vars', *query('varnames', '^aio_systemd_networkd_devices_')) | flatten(levels=1) }}"
|
|
openstack_hosts_systemd_networkd_prefix: "osa_testing"
|
|
{% endraw %}
|
|
|
|
## Galera settings
|
|
galera_monitoring_allowed_source: "0.0.0.0/0"
|
|
# TODO(noonedeadpunk): This should be enabled, once we will re-work SSL part
|
|
#galera_use_ssl: "{{ ('infra' in bootstrap_host_scenarios_expanded) }}"
|
|
galera_innodb_log_buffer_size: 4M
|
|
galera_wsrep_provider_options:
|
|
- { option: "gcache.size", value: "4M" }
|
|
|
|
galera_my_cnf_overrides:
|
|
mysqld:
|
|
read_buffer_size: '64K'
|
|
innodb_buffer_pool_size: '16M'
|
|
thread_stack: '192K'
|
|
thread_cache_size: '8'
|
|
tmp_table_size: '8M'
|
|
sort_buffer_size: '8M'
|
|
max_allowed_packet: '8M'
|
|
|
|
### Set workers for all services to optimise memory usage
|
|
|
|
# Apache MPM settings
|
|
openstack_apache_start_servers: 3
|
|
openstack_apache_min_spare_threads: 5
|
|
openstack_apache_max_spare_threads: 15
|
|
openstack_apache_thread_limit: 30
|
|
openstack_apache_thread_child: 30
|
|
|
|
## Keystone
|
|
keystone_wsgi_threads: 2
|
|
keystone_wsgi_processes: 2
|
|
|
|
## Barbican
|
|
barbican_wsgi_processes: 1
|
|
barbican_wsgi_threads: 1
|
|
|
|
## Blazar
|
|
blazar_wsgi_processes: 1
|
|
blazar_wsgi_threads: 1
|
|
|
|
## Cinder
|
|
cinder_wsgi_processes: 1
|
|
cinder_wsgi_threads: 1
|
|
cinder_wsgi_buffer_size: 16384
|
|
cinder_osapi_volume_workers_max: 1
|
|
|
|
## cloudkitty
|
|
cloudkitty_wsgi_processes: 1
|
|
cloudkitty_wsgi_threads: 1
|
|
|
|
## Glance
|
|
glance_api_threads: 2
|
|
glance_api_workers: 1
|
|
glance_wsgi_threads: 2
|
|
glance_wsgi_processes: 1
|
|
|
|
## Placement
|
|
placement_wsgi_threads: 1
|
|
placement_wsgi_processes: 1
|
|
placement_wsgi_buffer_size: 16384
|
|
|
|
## Manila
|
|
manila_wsgi_processes: 1
|
|
manila_wsgi_threads: 1
|
|
manila_osapi_share_workers: 2
|
|
manila_wsgi_buffer_size: 65535
|
|
|
|
## mistral
|
|
mistral_wsgi_processes: 1
|
|
mistral_wsgi_threads: 1
|
|
|
|
## Nova
|
|
nova_reserved_host_memory_mb: 256
|
|
nova_wsgi_threads: 1
|
|
nova_wsgi_processes: 1
|
|
nova_wsgi_buffer_size: 16384
|
|
nova_api_threads: 1
|
|
nova_osapi_compute_workers: 1
|
|
nova_conductor_workers: 1
|
|
nova_metadata_workers: 1
|
|
nova_scheduler_workers: 1
|
|
|
|
## Neutron
|
|
neutron_rpc_workers: 1
|
|
neutron_metadata_workers: 1
|
|
neutron_api_workers: 1
|
|
neutron_api_threads: 2
|
|
neutron_num_sync_threads: 1
|
|
neutron_wsgi_processes: 1
|
|
|
|
neutron_plugin_type: "{{ _neutron_plugin_driver }}"
|
|
|
|
{% if _neutron_plugin_driver != 'ml2.ovn' %}
|
|
neutron_ml2_drivers_type: "flat,vlan,vxlan"
|
|
|
|
neutron_plugin_base:
|
|
- router
|
|
- metering
|
|
{% endif %}
|
|
|
|
{% if 'neutron' in bootstrap_host_scenarios %}
|
|
tempest_test_includelist_neutron:
|
|
- "neutron_tempest_plugin.api.test_networks*"
|
|
- "tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops"
|
|
|
|
tempest_tempest_conf_overrides_neutron:
|
|
network-feature-enabled:
|
|
api_extensions: agent,allowed-address-pairs,binding,dhcp_agent_scheduler,ext-gw-mode,external-net,extra_dhcp_opt,extra_dhcp_optagent,extraroute,l3_agent_scheduler,metering,provider,quotas,router,security-group,service-type,subnet_allocation
|
|
{% endif %}
|
|
|
|
## Octavia
|
|
octavia_wsgi_threads: 1
|
|
octavia_wsgi_processes: 1
|
|
octavia_wsgi_buffer_size: 16384
|
|
|
|
## Heat
|
|
heat_api_workers: 1
|
|
heat_api_threads: 1
|
|
heat_wsgi_threads: 1
|
|
heat_wsgi_processes: 1
|
|
heat_wsgi_buffer_size: 16384
|
|
|
|
## Horizon
|
|
horizon_wsgi_processes: 1
|
|
horizon_wsgi_threads: 1
|
|
|
|
## Skyline
|
|
skyline_api_workers: 2
|
|
{% if 'skyline' in bootstrap_host_scenarios_expanded and 'yarn' in bootstrap_host_scenarios_expanded %}
|
|
{% raw %}
|
|
skyline_console_git_install_branch: "{{ skyline_git_track_branch }}"
|
|
{% endraw %}
|
|
{% endif %}
|
|
|
|
## Ceilometer
|
|
ceilometer_notification_workers: 1
|
|
|
|
## AODH
|
|
aodh_wsgi_threads: 1
|
|
aodh_wsgi_processes: 1
|
|
|
|
## Gnocchi
|
|
gnocchi_wsgi_threads: 1
|
|
gnocchi_wsgi_processes: 1
|
|
gnocchi_metricd_workers: 1
|
|
|
|
## Swift
|
|
swift_account_server_replicator_workers: 1
|
|
swift_server_replicator_workers: 1
|
|
swift_object_replicator_workers: 1
|
|
swift_account_server_workers: 1
|
|
swift_container_server_workers: 1
|
|
swift_object_server_workers: 1
|
|
swift_proxy_server_workers_not_capped: 1
|
|
swift_proxy_server_workers_capped: 1
|
|
swift_proxy_server_workers: 1
|
|
|
|
## Ironic
|
|
ironic_wsgi_threads: 1
|
|
ironic_wsgi_processes: 1
|
|
|
|
## Ironic Inspector
|
|
ironic_inspector_wsgi_threads: 1
|
|
ironic_inspector_wsgi_processes: 1
|
|
|
|
## Trove
|
|
trove_service_net_setup: true
|
|
trove_api_workers: 1
|
|
trove_conductor_workers: 1
|
|
trove_wsgi_threads: 1
|
|
trove_wsgi_processes: 1
|
|
|
|
## Magnum
|
|
magnum_wsgi_processes: 1
|
|
magnum_conductor_workers: 1
|
|
|
|
{% if 'metal' in bootstrap_host_scenarios %}
|
|
venv_wheel_build_enable: false
|
|
{% endif %}
|
|
|
|
## Sahara
|
|
sahara_api_workers: 1
|
|
sahara_wsgi_threads: 1
|
|
sahara_wsgi_processes: 1
|
|
sahara_wsgi_buffer_size: 16384
|
|
|
|
## Zun
|
|
zun_api_threads: 1
|
|
zun_wsgi_threads: 1
|
|
zun_wsgi_processes: 1
|
|
|
|
## Senlin
|
|
senlin_api_threads: 1
|
|
senlin_wsgi_threads: 1
|
|
senlin_wsgi_processes: 1
|
|
|
|
# NOTE: hpcloud-b4's eth0 uses 10.0.3.0/24, which overlaps with the
|
|
# lxc_net_address default
|
|
# TODO: We'll need to implement a mechanism to determine valid lxc_net_address
|
|
# value which will not overlap with an IP already assigned to the host.
|
|
lxc_net_address: 10.255.255.1
|
|
lxc_net_netmask: 255.255.255.0
|
|
lxc_net_dhcp_range: 10.255.255.2,10.255.255.253
|
|
|
|
{% if cache_timeout is defined %}
|
|
## Package cache timeout
|
|
cache_timeout: {{ cache_timeout }}
|
|
{% endif %}
|
|
|
|
lxc_container_backing_store: {{ _lxc_container_backing_store }}
|
|
{% if _lxc_container_backing_store == 'zfs' %}
|
|
lxc_container_zfs_root_name: "osa-test-pool/lxc"
|
|
{% endif %}
|
|
|
|
# bind mount the zuul repos into the containers
|
|
lxc_container_bind_mounts:
|
|
- host_directory: "/home/zuul/src"
|
|
container_directory: "/openstack/src"
|
|
- host_directory: "/opt/cache/files"
|
|
container_directory: "/opt/cache/files"
|
|
|
|
## Always setup tempest, the resources for it, then execute tests
|
|
tempest_install: yes
|
|
tempest_run: yes
|
|
rally_install: yes
|
|
|
|
# Do a gateway ping test once the tempest role creates it
|
|
tempest_network_ping_gateway: yes
|
|
|
|
{% if nodepool_dir.stat.exists %}
|
|
# Copy /etc/pip.conf into containers to get mirrors for wheels
|
|
# and due to extra-index-url bugs in Ubuntu, we workaround it
|
|
# by ignoring the config file during PIP upgrade time
|
|
venv_pip_upgrade_noconf: true
|
|
lxc_container_cache_files_from_host:
|
|
- /etc/pip.conf
|
|
# Disable chronyd in OpenStack CI
|
|
security_rhel7_enable_chrony: no
|
|
# The location where images are downloaded in openstack-infra
|
|
tempest_image_dir: "/opt/cache/files"
|
|
{% endif %}
|
|
|
|
# Set all the distros to the same value: a "quiet" print
|
|
# of kernel log messages.
|
|
openstack_user_kernel_options:
|
|
- key: 'kernel.printk'
|
|
value: '4 1 7 4'
|
|
|
|
openstack_hosts_package_state: latest
|
|
|
|
## Octavia
|
|
{% if 'octavia' in bootstrap_host_scenarios_expanded %}
|
|
# Enable Octavia V2 API/standalone
|
|
octavia_v2: True
|
|
# Disable Octavia V1 API
|
|
octavia_v1: False
|
|
|
|
octavia_management_net_subnet_cidr: {{ lbaas_network }}
|
|
octavia_management_net_subnet_allocation_pools: {{ lbaas_network | ansible.utils.nthhost('110') }}-{{ lbaas_network | ansible.utils.nthhost('-10') }}
|
|
|
|
{% if 'metal' in bootstrap_host_scenarios %}
|
|
# TODO(mnaser): The Octavia role relies on gathering IPs of hosts in the
|
|
# LBaaS network and using those in the health manager pool
|
|
# IPs. We do not store those IPs when running metal so we
|
|
# have to override it manually. We should remove this and
|
|
# fix the role (or the inventory tool) eventually.
|
|
octavia_hm_hosts: {{ lbaas_network | ansible.utils.nthhost('100') }} # br-lbaas IP
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
{% if 'proxy' in bootstrap_host_scenarios_expanded %}
|
|
# For testing with the 'proxy' scenario configure deployment environment
|
|
# to point to the local squid
|
|
# Playbooks will set a runtime proxy to the AIO host squid
|
|
deployment_environment_variables:
|
|
http_proxy: http://{{ squid_network | ansible.utils.ipaddr('address') }}:3128/
|
|
https_proxy: http://{{ squid_network | ansible.utils.ipaddr('address') }}:3128/
|
|
no_proxy: "localhost,127.0.0.1,{{ bootstrap_host_management_address }},{{ bootstrap_host_internal_address }},{{ bootstrap_host_public_address }}"
|
|
|
|
# Remove eth0 from all container so there is no default route and everything
|
|
# must go via the http proxy
|
|
lxc_container_networks: {}
|
|
{% endif %}
|
|
|
|
{% if 'ceph' not in bootstrap_host_scenarios_expanded and 'nfs' not in bootstrap_host_scenarios_expanded %}
|
|
cinder_backends:
|
|
lvm:
|
|
volume_group: cinder-volumes
|
|
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
|
|
volume_backend_name: LVM_iSCSI
|
|
iscsi_ip_address: "{{ bootstrap_host_management_address }}"
|
|
lvm_type: "thin"
|
|
extra_volume_types:
|
|
- low-iops
|
|
- high-iops
|
|
- ultra-high-iops
|
|
|
|
{% if 'cinder' in bootstrap_host_scenarios %}
|
|
tempest_test_includelist_cinder:
|
|
- tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
{% if 'glance' in bootstrap_host_scenarios and 'cinderstore' in bootstrap_host_scenarios_expanded %}
|
|
glance_default_store:
|
|
name: cinder
|
|
type: cinder
|
|
{% endif %}
|
|
|
|
# NOTE(jrosser) nested virt is not usable in general in opendev.org CI jobs,
|
|
# but if the keyword 'kvm' or 'qemu' is present in the expanded scenario then
|
|
# force the use of that virtualisation type. If neither is specified,
|
|
# default to qemu in zuul jobs, but allow the os_nova role to autodetect the
|
|
# virtualisation type local testing by leaving nova_virt_type undefined.
|
|
{% if 'kvm' in bootstrap_host_scenarios_expanded %}
|
|
nova_virt_type: kvm
|
|
{% elif (('qemu' in bootstrap_host_scenarios_expanded) or nodepool_dir.stat.exists) %}
|
|
nova_virt_type: qemu
|
|
{% endif %}
|
|
|
|
{% if nodepool_dir.stat.exists %}
|
|
openstack_hosts_git_safe_directories:
|
|
- "*"
|
|
{% endif %}
|