0f5e065855
After the integration with placement [1], we need to configure how zun-compute is going to work with nova-compute. * If zun-compute and nova-compute run on the same compute node, we need to set 'host_shared_with_nova' as true so that Zun will use the resource provider (compute node) created by nova. In this mode, containers and VMs could claim allocations against the same resource provider. * If zun-compute runs on a node without nova-compute, no extra configuration is needed. By default, each zun-compute will create a resource provider in placement to represent the compute node it manages. [1] https://blueprints.launchpad.net/zun/+spec/use-placement-resource-management Change-Id: I2d85911c4504e541d2994ce3d48e2fbb1090b813
90 lines
2.9 KiB
Django/Jinja
90 lines
2.9 KiB
Django/Jinja
---
|
|
kolla_base_distro: "{{ base_distro }}"
|
|
kolla_install_type: "{{ install_type }}"
|
|
network_interface: "{{ api_interface_name }}"
|
|
docker_restart_policy: "no"
|
|
|
|
# Use a random router id, otherwise it may result in the same router id
|
|
# in the CI gate.
|
|
keepalived_virtual_router_id: "{{ 250 | random(1) }}"
|
|
|
|
{% if enable_core_openstack | bool %}
|
|
kolla_internal_vip_address: "{{ api_interface_address if hostvars | length > 2 else '169.254.169.10' }}"
|
|
enable_haproxy: "{{ 'no' if hostvars | length > 2 else 'yes' }}"
|
|
neutron_external_interface: "fake_interface"
|
|
openstack_logging_debug: "True"
|
|
openstack_service_workers: "1"
|
|
{% endif %}
|
|
|
|
{% if need_build_image and not is_previous_release %}
|
|
# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deplying
|
|
docker_namespace: "lokolla"
|
|
docker_registry: "{{ api_interface_address }}:4000"
|
|
openstack_release: "{{ build_image_tag }}"
|
|
{% else %}
|
|
# use docker hub images
|
|
docker_namespace: "kolla"
|
|
{% if need_build_image and is_previous_release %}
|
|
# NOTE(mgoddard): Ensure that the insecure local registry is trusted, since it
|
|
# will be the source of images during the upgrade.
|
|
# NOTE(yoctozepto): this is required here for CI because we run templating
|
|
# of docker systemd command only once, using the previous release when upgrading
|
|
docker_custom_option: "--insecure-registry {{ api_interface_address }}:4000"
|
|
{% endif %}
|
|
{% if not is_previous_release %}
|
|
openstack_release: "{{ zuul.branch | basename }}"
|
|
{% else %}
|
|
openstack_release: "{{ previous_release }}"
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
{% if is_ceph %}
|
|
enable_ceph: "yes"
|
|
enable_ceph_mds: "yes"
|
|
enable_ceph_rgw: "yes"
|
|
enable_ceph_rgw_keystone: "yes"
|
|
enable_ceph_nfs: "yes"
|
|
enable_cinder: "yes"
|
|
ceph_pool_pg_num: 8
|
|
ceph_pool_pgp_num: 8
|
|
# This is experimental feature, disable if gate fail.
|
|
# In multinode jobs without ceph rolling upgrade fails.
|
|
glance_enable_rolling_upgrade: "yes"
|
|
{% else %}
|
|
# NOTE(yoctozepto): in case ceph is *not* used, we have to use the ansible node (primary)
|
|
# to avoid random errors due to inventory randomness in Zuul (YAML format)
|
|
# because we use primary as the API endpoint and there is currently no HAProxy in CI
|
|
# and the default behavior is to pick the "first" node (issue affects only multinode)
|
|
glance_api_hosts: ["{{ inventory_hostname }}"]
|
|
{% endif %}
|
|
|
|
{% if scenario == "cinder-lvm" %}
|
|
enable_cinder: "yes"
|
|
enable_cinder_backend_lvm: "yes"
|
|
{% endif %}
|
|
|
|
{% if scenario == "zun" %}
|
|
enable_zun: "yes"
|
|
enable_kuryr: "yes"
|
|
enable_etcd: "yes"
|
|
enable_placement: "yes"
|
|
docker_configure_for_zun: "yes"
|
|
{% endif %}
|
|
|
|
{% if scenario == "scenario_nfv" %}
|
|
enable_tacker: "yes"
|
|
enable_neutron_sfc: "yes"
|
|
enable_mistral: "yes"
|
|
enable_redis: "yes"
|
|
enable_barbican: "yes"
|
|
{% endif %}
|
|
|
|
{% if scenario == "ironic" %}
|
|
enable_ironic: "yes"
|
|
ironic_dnsmasq_dhcp_range: "10.42.0.2,10.42.0.254"
|
|
{% endif %}
|
|
|
|
{% if scenario == "masakari" %}
|
|
enable_masakari: "yes"
|
|
{% endif %}
|