Merge "Support multiple nova cells"
This commit is contained in:
commit
f2042a878e
@ -516,9 +516,6 @@ openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
|
||||
# Optionally allow Kolla to set sysctl values
|
||||
set_sysctl: "yes"
|
||||
|
||||
# Valid options are [ none, novnc, spice, rdp ]
|
||||
nova_console: "novnc"
|
||||
|
||||
# Endpoint type used to connect with OpenStack services with ansible modules.
|
||||
# Valid options are [ public, internal, admin ]
|
||||
openstack_interface: "admin"
|
||||
@ -558,6 +555,7 @@ enable_blazar: "no"
|
||||
enable_cadf_notifications: "no"
|
||||
enable_ceilometer: "no"
|
||||
enable_ceilometer_ipmi: "no"
|
||||
enable_cells: "no"
|
||||
enable_central_logging: "no"
|
||||
enable_ceph: "no"
|
||||
enable_ceph_mds: "no"
|
||||
@ -920,6 +918,9 @@ nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
|
||||
# Valid options are [ kvm, qemu, vmware, xenapi ]
|
||||
nova_compute_virt_type: "kvm"
|
||||
nova_instance_datadir_volume: "nova_compute"
|
||||
nova_safety_upgrade: "no"
|
||||
# Valid options are [ none, novnc, spice, rdp ]
|
||||
nova_console: "novnc"
|
||||
|
||||
#######################
|
||||
# Murano options
|
||||
|
@ -270,6 +270,9 @@ nova
|
||||
[nova-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-super-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-novncproxy:children]
|
||||
nova
|
||||
|
||||
|
@ -289,6 +289,9 @@ nova
|
||||
[nova-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-super-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-novncproxy:children]
|
||||
nova
|
||||
|
||||
|
266
ansible/nova.yml
Normal file
266
ansible/nova.yml
Normal file
@ -0,0 +1,266 @@
|
||||
---
|
||||
# This playbook is for nova services. Due to support for deployment of cells,
|
||||
# nova is separated into two roles - nova and nova-cell. This makes it more
|
||||
# complicated than other services, as we may execute each role several times
|
||||
# for a given operation.
|
||||
#
|
||||
# The nova role now deploys the global services:
|
||||
#
|
||||
# * nova-api
|
||||
# * nova-scheduler
|
||||
# * nova-super-conductor (if enable_cells is true)
|
||||
#
|
||||
# The nova-cell role handles services specific to a cell:
|
||||
#
|
||||
# * nova-compute
|
||||
# * nova-compute-ironic
|
||||
# * nova-conductor
|
||||
# * nova-libvirt
|
||||
# * nova-novncproxy
|
||||
# * nova-serialproxy
|
||||
# * nova-spicehtml5proxy
|
||||
# * nova-ssh
|
||||
|
||||
# We need to perform database bootstrapping before deploying or upgrading any
|
||||
# containers, to ensure all database schema migrations have been performed,
|
||||
# both in the API and cell databases. Note that this should not be disruptive
|
||||
# to the Nova services, which will continue to run against the new schema.
|
||||
|
||||
- name: Bootstrap nova API databases
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-api
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-bootstrap
|
||||
- nova-api
|
||||
- nova-api-bootstrap
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
# * Create nova API & cell0 DBs & users
|
||||
# * API DB schema migrations
|
||||
# * Map cell0
|
||||
# * Cell0 DB schema migrations
|
||||
- name: Bootstrap deploy
|
||||
include_role:
|
||||
name: nova
|
||||
tasks_from: bootstrap
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action in ['deploy', 'reconfigure']
|
||||
|
||||
# * API DB schema migrations
|
||||
# * Cell0 DB schema migrations
|
||||
- name: Bootstrap upgrade
|
||||
include_role:
|
||||
name: nova
|
||||
tasks_from: bootstrap_upgrade
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
- name: Bootstrap nova cell databases
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-conductor
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-bootstrap
|
||||
- nova-cell
|
||||
- nova-cell-bootstrap
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
# * Create nova cell DBs & users
|
||||
# * Create RabbitMQ vhost & user
|
||||
# * Cell DB schema migrations
|
||||
# * Create cell mappings
|
||||
- name: Bootstrap deploy
|
||||
include_role:
|
||||
name: nova-cell
|
||||
tasks_from: bootstrap
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action in ['deploy', 'reconfigure']
|
||||
|
||||
# * Cell DB schema migrations
|
||||
- name: Bootstrap upgrade
|
||||
include_role:
|
||||
name: nova-cell
|
||||
tasks_from: bootstrap_upgrade
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
# Standard {{ kolla_action }}.yml for nova role.
|
||||
- name: Apply role nova
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-api
|
||||
- nova-scheduler
|
||||
- nova-super-conductor
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-api
|
||||
- nova-api-deploy
|
||||
- nova-api-upgrade
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
roles:
|
||||
- role: nova
|
||||
when: enable_nova | bool
|
||||
|
||||
# Standard {{ kolla_action }}.yml for nova-cell role.
|
||||
- name: Apply role nova-cell
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- compute
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-spicehtml5proxy
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-cell
|
||||
- nova-cell-deploy
|
||||
- nova-cell-upgrade
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
roles:
|
||||
- role: nova-cell
|
||||
when: enable_nova | bool
|
||||
|
||||
# Reload nova scheduler to pick up new cells.
|
||||
# TODO(mgoddard): Ideally we'd only do this when one or more cells have been
|
||||
# created or updated.
|
||||
- name: Refresh nova scheduler cell cache
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-scheduler
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-api
|
||||
- nova-refresh-scheduler-cell-cache
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
name: nova
|
||||
tasks_from: refresh_scheduler_cell_cache
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action in ['deploy', 'reconfigure']
|
||||
|
||||
# Following an upgrade, Nova services must be restarted once all compute
|
||||
# services have registered themselves, to remove the RPC version pin.
|
||||
# Also, when nova_safety_upgrade is true, this starts services which were
|
||||
# stopped during the upgrade. Nova upgrade documentation recommends starting
|
||||
# conductors first and API last.
|
||||
|
||||
- name: Reload global Nova super conductor services
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-super-conductor
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-reload
|
||||
- nova-api
|
||||
- nova-api-reload
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
name: nova
|
||||
tasks_from: reload_super_conductor
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
- name: Reload Nova cell services
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- compute
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-spicehtml5proxy
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-reload
|
||||
- nova-cell
|
||||
- nova-cell-reload
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
role: nova-cell
|
||||
tasks_from: reload
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
- name: Reload global Nova API services
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-api
|
||||
- nova-scheduler
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-reload
|
||||
- nova-api
|
||||
- nova-api-reload
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
name: nova
|
||||
tasks_from: reload_api
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
# Following an upgrade, data migrations should be performed for the API
|
||||
# database. This should be done once all cells have been upgraded.
|
||||
|
||||
- name: Run Nova API online data migrations
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-api
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-api
|
||||
- nova-online-data-migrations
|
||||
- nova-api-online-data-migrations
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
role: nova
|
||||
tasks_from: online_data_migrations
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
||||
|
||||
# Following an upgrade, data migrations should be performed for each cell
|
||||
# database. This should be done once all hosts in the cell have been upgraded,
|
||||
# and ideally once all hosts in the cloud have been upgraded.
|
||||
|
||||
- name: Run Nova cell online data migrations
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- nova-conductor
|
||||
- '&enable_nova_True'
|
||||
tags:
|
||||
- nova
|
||||
- nova-cell
|
||||
- nova-online-data-migrations
|
||||
- nova-cell-online-data-migrations
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
tasks:
|
||||
- import_role:
|
||||
role: nova-cell
|
||||
tasks_from: online_data_migrations
|
||||
when:
|
||||
- enable_nova | bool
|
||||
- kolla_action == 'upgrade'
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
project_name: "nova"
|
||||
project_name: "nova-cell"
|
||||
|
||||
nova_services:
|
||||
nova_cell_services:
|
||||
nova-libvirt:
|
||||
container_name: nova_libvirt
|
||||
group: compute
|
||||
group: "{{ nova_cell_compute_group }}"
|
||||
enabled: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}"
|
||||
image: "{{ nova_libvirt_image_full }}"
|
||||
pid_mode: "host"
|
||||
@ -13,133 +13,42 @@ nova_services:
|
||||
dimensions: "{{ nova_libvirt_dimensions }}"
|
||||
nova-ssh:
|
||||
container_name: "nova_ssh"
|
||||
group: "compute"
|
||||
group: "{{ nova_cell_compute_group }}"
|
||||
image: "{{ nova_ssh_image_full }}"
|
||||
enabled: "{{ enable_nova_ssh | bool }}"
|
||||
volumes: "{{ nova_ssh_default_volumes + nova_ssh_extra_volumes }}"
|
||||
dimensions: "{{ nova_ssh_dimensions }}"
|
||||
nova-api:
|
||||
container_name: "nova_api"
|
||||
group: "nova-api"
|
||||
image: "{{ nova_api_image_full }}"
|
||||
enabled: True
|
||||
privileged: True
|
||||
volumes: "{{ nova_api_default_volumes + nova_api_extra_volumes }}"
|
||||
dimensions: "{{ nova_api_dimensions }}"
|
||||
haproxy:
|
||||
nova_api:
|
||||
enabled: "{{ enable_nova }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_api_port }}"
|
||||
listen_port: "{{ nova_api_listen_port }}"
|
||||
nova_api_external:
|
||||
enabled: "{{ enable_nova }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_api_port }}"
|
||||
listen_port: "{{ nova_api_listen_port }}"
|
||||
nova_metadata:
|
||||
enabled: "{{ enable_nova }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_metadata_port }}"
|
||||
listen_port: "{{ nova_metadata_listen_port }}"
|
||||
nova_metadata_external:
|
||||
enabled: "{{ enable_nova }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_metadata_port }}"
|
||||
listen_port: "{{ nova_metadata_listen_port }}"
|
||||
nova_rdp:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'rdp' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ rdp_port }}"
|
||||
host_group: "hyperv"
|
||||
nova-novncproxy:
|
||||
container_name: "nova_novncproxy"
|
||||
group: "nova-novncproxy"
|
||||
group: "{{ nova_cell_novncproxy_group }}"
|
||||
image: "{{ nova_novncproxy_image_full }}"
|
||||
enabled: "{{ nova_console == 'novnc' }}"
|
||||
volumes: "{{ nova_novncproxy_default_volumes + nova_novncproxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_novncproxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_novncproxy:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_novncproxy_port }}"
|
||||
listen_port: "{{ nova_novncproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel 1h"
|
||||
nova_novncproxy_external:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_novncproxy_port }}"
|
||||
listen_port: "{{ nova_novncproxy_listen_port }}"
|
||||
nova-scheduler:
|
||||
container_name: "nova_scheduler"
|
||||
group: "nova-scheduler"
|
||||
image: "{{ nova_scheduler_image_full }}"
|
||||
enabled: True
|
||||
volumes: "{{ nova_scheduler_default_volumes + nova_scheduler_extra_volumes }}"
|
||||
dimensions: "{{ nova_scheduler_dimensions }}"
|
||||
nova-spicehtml5proxy:
|
||||
container_name: "nova_spicehtml5proxy"
|
||||
group: "nova-spicehtml5proxy"
|
||||
group: "{{ nova_cell_spicehtml5proxy_group }}"
|
||||
image: "{{ nova_spicehtml5proxy_image_full }}"
|
||||
enabled: "{{ nova_console == 'spice' }}"
|
||||
volumes: "{{ nova_spicehtml5proxy_default_volumes + nova_spicehtml5proxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_spicehtml5proxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_spicehtml5proxy:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_spicehtml5proxy_port }}"
|
||||
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
nova_spicehtml5proxy_external:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_spicehtml5proxy_port }}"
|
||||
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
nova-serialproxy:
|
||||
container_name: "nova_serialproxy"
|
||||
group: "nova-serialproxy"
|
||||
group: "{{ nova_cell_serialproxy_group }}"
|
||||
image: "{{ nova_serialproxy_image_full }}"
|
||||
enabled: "{{ enable_nova_serialconsole_proxy | bool }}"
|
||||
volumes: "{{ nova_serialproxy_default_volumes + nova_serialproxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_serialproxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_serialconsole_proxy:
|
||||
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_serialproxy_port }}"
|
||||
listen_port: "{{ nova_serialproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
nova_serialconsole_proxy_external:
|
||||
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_serialproxy_port }}"
|
||||
listen_port: "{{ nova_serialproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
nova-conductor:
|
||||
container_name: "nova_conductor"
|
||||
group: "nova-conductor"
|
||||
group: "{{ nova_cell_conductor_group }}"
|
||||
enabled: True
|
||||
image: "{{ nova_conductor_image_full }}"
|
||||
volumes: "{{ nova_conductor_default_volumes + nova_conductor_extra_volumes }}"
|
||||
dimensions: "{{ nova_conductor_dimensions }}"
|
||||
nova-compute:
|
||||
container_name: "nova_compute"
|
||||
group: "compute"
|
||||
group: "{{ nova_cell_compute_group }}"
|
||||
image: "{{ nova_compute_image_full }}"
|
||||
environment:
|
||||
LIBGUESTFS_BACKEND: "direct"
|
||||
@ -150,9 +59,9 @@ nova_services:
|
||||
dimensions: "{{ nova_compute_dimensions }}"
|
||||
nova-compute-ironic:
|
||||
container_name: "nova_compute_ironic"
|
||||
group: "nova-compute-ironic"
|
||||
group: "{{ nova_cell_compute_ironic_group }}"
|
||||
image: "{{ nova_compute_ironic_image_full }}"
|
||||
enabled: "{{ enable_ironic | bool }}"
|
||||
enabled: "{{ enable_ironic | bool and nova_cell_name == nova_cell_ironic_cell_name }}"
|
||||
volumes: "{{ nova_compute_ironic_default_volumes + nova_compute_ironic_extra_volumes }}"
|
||||
dimensions: "{{ nova_compute_ironic_dimensions }}"
|
||||
|
||||
@ -175,27 +84,125 @@ nova_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
||||
nova_hw_disk_discard: "unmap"
|
||||
|
||||
ceph_client_nova_keyring_caps:
|
||||
mon: 'allow r, allow command "osd blacklist"'
|
||||
mon: 'profile rbd'
|
||||
osd: >-
|
||||
allow class-read object_prefix rbd_children,
|
||||
allow rwx pool={{ ceph_cinder_pool_name }},
|
||||
allow rwx pool={{ ceph_cinder_pool_name }}-cache,
|
||||
allow rwx pool={{ ceph_nova_pool_name }},
|
||||
allow rwx pool={{ ceph_nova_pool_name }}-cache,
|
||||
allow rwx pool={{ ceph_glance_pool_name }},
|
||||
allow rwx pool={{ ceph_glance_pool_name }}-cache
|
||||
profile rbd pool={{ ceph_cinder_pool_name }},
|
||||
profile rbd pool={{ ceph_cinder_pool_name }}-cache,
|
||||
profile rbd pool={{ ceph_nova_pool_name }},
|
||||
profile rbd pool={{ ceph_nova_pool_name }}-cache,
|
||||
profile rbd pool={{ ceph_glance_pool_name }},
|
||||
profile rbd pool={{ ceph_glance_pool_name }}-cache
|
||||
|
||||
####################
|
||||
# Cells Options
|
||||
####################
|
||||
|
||||
# Name of the cell. For backwards compatibility this defaults to an empty name,
|
||||
# since the cell created by kolla-ansible prior to the Train release had no
|
||||
# name.
|
||||
nova_cell_name: ''
|
||||
|
||||
# Name of the cell in which nova-compute-ironic will be deployed. For backwards
|
||||
# compatibility this defaults to an empty name, since the cell created by
|
||||
# kolla-ansible prior to the Train release had no name.
|
||||
nova_cell_ironic_cell_name: ''
|
||||
|
||||
# Name of the Ansible group containing compute hosts. For backwards
|
||||
# compatibility this is 'compute'. For a multi-cell deployment, this should be
|
||||
# set to the name of a group containing only the compute hosts in this cell.
|
||||
# Note that all compute hosts should also be in the 'compute' group.
|
||||
nova_cell_compute_group: 'compute'
|
||||
|
||||
# Name of the Ansible group containing nova-compute-ironic hosts. For backwards
|
||||
# compatibility this is 'nova-compute-ironic'. For a multi-cell deployment,
|
||||
# this should be set to the name of a group containing only the compute hosts #
|
||||
# in this cell. Note that all nova-compute-ironic hosts should also be in the
|
||||
# 'nova-compute-ironic' group.
|
||||
nova_cell_compute_ironic_group: 'nova-compute-ironic'
|
||||
|
||||
# Name of the Ansible group containing nova-conductor hosts. For backwards
|
||||
# compatibility this is 'nova-conductor'. For a multi-cell deployment, this
|
||||
# should be set to the name of a group containing only the nova-conductor hosts
|
||||
# in this cell. Note that all nova-conductor hosts should also be in the
|
||||
# 'nova-conductor' group.
|
||||
nova_cell_conductor_group: 'nova-conductor'
|
||||
|
||||
# Name of the Ansible group containing nova-novncproxy hosts. For backwards
|
||||
# compatibility this is 'nova-novncproxy'. For a multi-cell deployment, this
|
||||
# should be set to the name of a group containing only the nova-novncproxy
|
||||
# hosts in this cell. Note that all nova-novncproxy hosts should also be in
|
||||
# the 'nova-novncproxy' group.
|
||||
nova_cell_novncproxy_group: 'nova-novncproxy'
|
||||
|
||||
# Name of the Ansible group containing nova-spicehtml5proxy hosts. For
|
||||
# backwards compatibility this is 'nova-spicehtml5proxy'. For a multi-cell
|
||||
# deployment, this should be set to the name of a group containing only the
|
||||
# nova-spicehtml5proxy hosts in this cell. Note that all nova-spicehtml5proxy
|
||||
# hosts should also be in the 'nova-spicehtml5proxy' group.
|
||||
nova_cell_spicehtml5proxy_group: 'nova-spicehtml5proxy'
|
||||
|
||||
# Name of the Ansible group containing nova-serialproxy hosts. For backwards
|
||||
# compatibility this is 'nova-serialproxy'. For a multi-cell deployment, this
|
||||
# should be set to the name of a group containing only the nova-serialproxy
|
||||
# hosts in this cell. Note that all nova-serialproxy hosts should also be in
|
||||
# the 'nova-serialproxy' group.
|
||||
nova_cell_serialproxy_group: 'nova-serialproxy'
|
||||
|
||||
####################
|
||||
# Database
|
||||
####################
|
||||
nova_database_name: "nova"
|
||||
nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}"
|
||||
nova_database_address: "{{ database_address }}:{{ database_port }}"
|
||||
nova_cell_database_admin_user: "{{ database_user }}"
|
||||
nova_cell_database_admin_password: "{{ database_password }}"
|
||||
|
||||
nova_cell_database_name: "{{ 'nova_' ~ nova_cell_name if nova_cell_name else 'nova' }}"
|
||||
nova_cell_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}"
|
||||
nova_cell_database_password: '{{ nova_database_password }}'
|
||||
nova_cell_database_address: "{% if nova_cell_database_group is defined %}{{ 'api' | kolla_address(groups[nova_cell_database_group][0]) }}{% else %}{{ database_address }}{% endif %}"
|
||||
nova_cell_database_port: '{{ database_port }}'
|
||||
|
||||
# Ideally, the cell conductors would not have access to the API database.
|
||||
# However, certain features require it (see
|
||||
# https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#operations-requiring-upcalls).
|
||||
# Also, it is necessary for executing nova-manage cell_v2 create_cell.
|
||||
nova_api_database_name: "nova_api"
|
||||
nova_api_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova_api{% endif %}"
|
||||
nova_api_database_address: "{{ database_address }}:{{ database_port }}"
|
||||
nova_api_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
|
||||
|
||||
# Optional group for cell database. If this is not defined, then the top level database is used.
|
||||
# nova_cell_database_group:
|
||||
|
||||
####################
|
||||
# RabbitMQ
|
||||
####################
|
||||
|
||||
# Internal rabbit users should set these
|
||||
nova_cell_rpc_user: "{{ om_rpc_user }}"
|
||||
nova_cell_rpc_password: "{{ om_rpc_password }}"
|
||||
nova_cell_rpc_port: "{{ om_rpc_port }}"
|
||||
nova_cell_rpc_group_name: "{{ om_rpc_group }}"
|
||||
nova_cell_rpc_transport: "{{ om_rpc_transport }}"
|
||||
nova_cell_rpc_vhost: "{{ 'nova_' ~ nova_cell_name if nova_cell_name else om_rpc_vhost }}"
|
||||
|
||||
nova_cell_notify_user: "{{ nova_cell_rpc_user }}"
|
||||
nova_cell_notify_password: "{{ nova_cell_rpc_password }}"
|
||||
nova_cell_notify_port: "{{ nova_cell_rpc_port }}"
|
||||
nova_cell_notify_group_name: "{{ nova_cell_rpc_group_name }}"
|
||||
nova_cell_notify_transport: "{{ nova_cell_rpc_transport }}"
|
||||
nova_cell_notify_vhost: "{{ nova_cell_rpc_vhost }}"
|
||||
|
||||
# External Rabbit users should override these
|
||||
nova_cell_rpc_transport_url: "{{ nova_cell_rpc_transport }}://{% for host in groups[nova_cell_rpc_group_name] %}{{ nova_cell_rpc_user }}:{{ nova_cell_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ nova_cell_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ nova_cell_rpc_vhost }}"
|
||||
nova_cell_notify_transport_url: "{{ nova_cell_notify_transport }}://{% for host in groups[nova_cell_notify_group_name] %}{{ nova_cell_notify_user }}:{{ nova_cell_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ nova_cell_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ nova_cell_notify_vhost }}"
|
||||
|
||||
# These vhosts and users will be created.
|
||||
nova_cell_rpc_rabbitmq_users:
|
||||
- user: "{{ nova_cell_rpc_user }}"
|
||||
password: "{{ nova_cell_rpc_password }}"
|
||||
vhost: "{{ nova_cell_rpc_vhost }}"
|
||||
nova_cell_notify_rabbitmq_users:
|
||||
- user: "{{ nova_cell_notify_user }}"
|
||||
password: "{{ nova_cell_notify_password }}"
|
||||
vhost: "{{ nova_cell_notify_vhost }}"
|
||||
|
||||
####################
|
||||
# Docker
|
||||
@ -212,10 +219,6 @@ nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker
|
||||
nova_ssh_tag: "{{ nova_tag }}"
|
||||
nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}"
|
||||
|
||||
nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-conductor"
|
||||
nova_conductor_tag: "{{ nova_tag }}"
|
||||
nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
|
||||
|
||||
nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-novncproxy"
|
||||
nova_novncproxy_tag: "{{ nova_tag }}"
|
||||
nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}"
|
||||
@ -224,31 +227,25 @@ nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else ''
|
||||
nova_spicehtml5proxy_tag: "{{ nova_tag }}"
|
||||
nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}"
|
||||
|
||||
nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-scheduler"
|
||||
nova_scheduler_tag: "{{ nova_tag }}"
|
||||
nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}"
|
||||
nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-serialproxy"
|
||||
nova_serialproxy_tag: "{{ nova_tag }}"
|
||||
nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}"
|
||||
|
||||
nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-conductor"
|
||||
nova_conductor_tag: "{{ nova_tag }}"
|
||||
nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
|
||||
|
||||
nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute"
|
||||
nova_compute_tag: "{{ nova_tag }}"
|
||||
nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}"
|
||||
|
||||
nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-api"
|
||||
nova_api_tag: "{{ nova_tag }}"
|
||||
nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}"
|
||||
|
||||
nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute-ironic"
|
||||
nova_compute_ironic_tag: "{{ nova_tag }}"
|
||||
nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}"
|
||||
|
||||
nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-serialproxy"
|
||||
nova_serialproxy_tag: "{{ nova_tag }}"
|
||||
nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}"
|
||||
|
||||
nova_libvirt_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_ssh_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_api_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_novncproxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_scheduler_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_spicehtml5proxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_serialproxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_conductor_dimensions: "{{ default_container_dimensions }}"
|
||||
@ -275,22 +272,11 @@ nova_ssh_default_volumes:
|
||||
- "{{ nova_instance_datadir_volume }}:/var/lib/nova"
|
||||
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_api_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_novncproxy_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_scheduler_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_spicehtml5proxy_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
@ -323,19 +309,24 @@ nova_compute_ironic_default_volumes:
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
# Used by bootstrapping containers.
|
||||
nova_cell_bootstrap_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-cell-bootstrap/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
|
||||
nova_extra_volumes: "{{ default_extra_volumes }}"
|
||||
nova_libvirt_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_ssh_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_api_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_novncproxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_scheduler_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_spicehtml5proxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_serialproxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_conductor_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_compute_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_compute_ironic_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
|
||||
# Used by bootstrapping containers.
|
||||
nova_cell_bootstrap_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
|
||||
####################
|
||||
# HAProxy
|
||||
@ -346,14 +337,6 @@ haproxy_nova_serialconsole_proxy_tunnel_timeout: "10m"
|
||||
# OpenStack
|
||||
####################
|
||||
|
||||
nova_legacy_admin_endpoint: "{{ admin_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
|
||||
nova_legacy_internal_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
|
||||
nova_legacy_public_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
|
||||
|
||||
nova_admin_endpoint: "{{ admin_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2.1"
|
||||
nova_internal_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2.1"
|
||||
nova_public_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn }}:{{ nova_api_port }}/v2.1"
|
||||
|
||||
nova_logging_debug: "{{ openstack_logging_debug }}"
|
||||
|
||||
openstack_nova_auth: "{{ openstack_auth }}"
|
||||
@ -364,16 +347,24 @@ nova_safety_upgrade: "no"
|
||||
nova_libvirt_port: "{{'16514' if libvirt_tls | bool else '16509'}}"
|
||||
nova_ssh_port: "8022"
|
||||
|
||||
nova_services_require_nova_conf:
|
||||
- nova-api
|
||||
# NOTE(mgoddard): The order of this list defines the order in which services
|
||||
# are restarted during an upgrade in reload.yml. Restarting the conductor
|
||||
# first is recommended.
|
||||
nova_cell_services_require_nova_conf:
|
||||
- nova-conductor
|
||||
- nova-compute
|
||||
- nova-compute-ironic
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-scheduler
|
||||
- nova-spicehtml5proxy
|
||||
|
||||
# Ideally these services would not require access to policy files, but there
|
||||
# is a place in compute where they are referenced:
|
||||
# https://opendev.org/openstack/nova/src/commit/627c461a62ce722a4c95a44b181f40b8db198c2b/nova/network/neutronv2/api.py#L532
|
||||
nova_cell_services_require_policy_json:
|
||||
- nova-compute
|
||||
- nova-compute-ironic
|
||||
|
||||
# After upgrading nova-compute, services will have an RPC version cap in place.
|
||||
# We need to restart all services that communicate with nova-compute in order
|
||||
# to allow them to use the latest RPC version. Ideally, there would be a way to
|
||||
@ -383,30 +374,13 @@ nova_services_require_nova_conf:
|
||||
# around 10 seconds, but the default is 30 to allow room for slowness.
|
||||
nova_compute_startup_delay: 30
|
||||
|
||||
####################
|
||||
# Keystone
|
||||
####################
|
||||
nova_ks_services:
|
||||
- name: "nova_legacy"
|
||||
type: "compute_legacy"
|
||||
description: "OpenStack Compute Service (Legacy 2.0)"
|
||||
endpoints:
|
||||
- {'interface': 'admin', 'url': '{{ nova_legacy_admin_endpoint }}'}
|
||||
- {'interface': 'internal', 'url': '{{ nova_legacy_internal_endpoint }}'}
|
||||
- {'interface': 'public', 'url': '{{ nova_legacy_public_endpoint }}'}
|
||||
- name: "nova"
|
||||
type: "compute"
|
||||
description: "OpenStack Compute Service"
|
||||
endpoints:
|
||||
- {'interface': 'admin', 'url': '{{ nova_admin_endpoint }}'}
|
||||
- {'interface': 'internal', 'url': '{{ nova_internal_endpoint }}'}
|
||||
- {'interface': 'public', 'url': '{{ nova_public_endpoint }}'}
|
||||
|
||||
nova_ks_users:
|
||||
- project: "service"
|
||||
user: "{{ nova_keystone_user }}"
|
||||
password: "{{ nova_keystone_password }}"
|
||||
role: "admin"
|
||||
# By default, the cell conductor is configured with access to the API database.
|
||||
# This is necessary for some features which require an 'upcall'. These are
|
||||
# listed here:
|
||||
# https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#operations-requiring-upcalls.
|
||||
# To disable access to the API database from cell conductors, set
|
||||
# nova_cell_conductor_has_api_database to no.
|
||||
nova_cell_conductor_has_api_database: "yes"
|
||||
|
||||
####################
|
||||
# Notification
|
||||
|
93
ansible/roles/nova-cell/filter_plugins/filters.py
Normal file
93
ansible/roles/nova-cell/filter_plugins/filters.py
Normal file
@ -0,0 +1,93 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from jinja2.exceptions import TemplateRuntimeError
|
||||
import re
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'extract_cell': self._extract_cell,
|
||||
'namespace_haproxy_for_cell': self._namespace_haproxy_for_cell,
|
||||
}
|
||||
|
||||
def _extract_cell(self, list_cells_cli_output, cell_name):
|
||||
"""Extract cell settings from nova_manage CLI
|
||||
|
||||
This filter tries to extract the cell settings for the specified cell
|
||||
from the output of the command:
|
||||
nova-manage cell_v2 list_cells --verbose
|
||||
If the cell is not registered, nothing is returned.
|
||||
|
||||
An example line from this command for a cell with no name looks like this:
|
||||
|
||||
| | 68a3f49e-27ec-422f-9e2e-2a4e5dc8291b | rabbit://openstack:password@1.2.3.4:5672 | mysql+pymysql://nova:password@1.2.3.4:3306/nova | False | # noqa
|
||||
|
||||
And for a cell with a name:
|
||||
|
||||
| cell1 | 68a3f49e-27ec-422f-9e2e-2a4e5dc8291b | rabbit://openstack:password@1.2.3.4:5672 | mysql+pymysql://nova:password@1.2.3.4:3306/nova | False | # noqa
|
||||
|
||||
"""
|
||||
# NOTE(priteau): regexp doesn't support passwords containing spaces
|
||||
p = re.compile(
|
||||
r'\| +(?P<cell_name>[^ ]+)? +'
|
||||
r'\| +(?!00000000-0000-0000-0000-000000000000)'
|
||||
r'(?P<cell_uuid>[0-9a-f\-]+) +'
|
||||
r'\| +(?P<cell_message_queue>[^ ]+) +'
|
||||
r'\| +(?P<cell_database>[^ ]+) +'
|
||||
r'\| +(?P<cell_disabled>[^ ]+) +'
|
||||
r'\|$')
|
||||
cells = []
|
||||
for line in list_cells_cli_output['stdout_lines']:
|
||||
match = p.match(line)
|
||||
if match:
|
||||
# If there is no cell name, we get None in the cell_name match
|
||||
# group. Use an empty string to match the default cell.
|
||||
match_cell_name = match.group('cell_name') or ""
|
||||
if match_cell_name == cell_name:
|
||||
cells.append(match.groupdict())
|
||||
if len(cells) > 1:
|
||||
raise TemplateRuntimeError(
|
||||
"Cell: {} has duplicates. "
|
||||
"Manual cleanup required.".format(cell_name))
|
||||
return cells[0] if cells else None
|
||||
|
||||
def _namespace_haproxy_for_cell(self, services, cell_name):
|
||||
"""Add namespacing to HAProxy configuration for a cell.
|
||||
|
||||
:param services: dict defining service configuration.
|
||||
:param cell_name: name of the cell, or empty if cell has no name.
|
||||
:returns: the services dict, with haproxy configuration modified to
|
||||
provide namespacing between cells.
|
||||
"""
|
||||
|
||||
def _namespace(name):
|
||||
# Backwards compatibility - no cell name suffix for cells without a
|
||||
# name.
|
||||
return "{}_{}".format(name, cell_name) if cell_name else name
|
||||
|
||||
# Service name must be namespaced as haproxy-config uses this as the
|
||||
# config file name.
|
||||
services = {
|
||||
_namespace(service_name): service
|
||||
for service_name, service in services.items()
|
||||
}
|
||||
for service in services.values():
|
||||
if service.get('haproxy'):
|
||||
service['haproxy'] = {
|
||||
_namespace(name): service['haproxy'][name]
|
||||
for name in service['haproxy']
|
||||
}
|
||||
return services
|
@ -2,7 +2,7 @@
|
||||
- name: Restart nova-conductor container
|
||||
vars:
|
||||
service_name: "nova-conductor"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
@ -14,11 +14,63 @@
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-novncproxy container
|
||||
vars:
|
||||
service_name: "nova-novncproxy"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-spicehtml5proxy container
|
||||
vars:
|
||||
service_name: "nova-spicehtml5proxy"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-serialproxy container
|
||||
vars:
|
||||
service_name: "nova-serialproxy"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-ssh container
|
||||
vars:
|
||||
service_name: "nova-ssh"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
@ -35,7 +87,7 @@
|
||||
- name: Restart nova-libvirt container
|
||||
vars:
|
||||
service_name: "nova-libvirt"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
@ -54,90 +106,10 @@
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-scheduler container
|
||||
vars:
|
||||
service_name: "nova-scheduler"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-novncproxy container
|
||||
vars:
|
||||
service_name: "nova-novncproxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-spicehtml5proxy container
|
||||
vars:
|
||||
service_name: "nova-spicehtml5proxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-serialproxy container
|
||||
vars:
|
||||
service_name: "nova-serialproxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-api container
|
||||
vars:
|
||||
service_name: "nova-api"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-compute container
|
||||
vars:
|
||||
service_name: "nova-compute"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
@ -155,7 +127,7 @@
|
||||
- name: Restart nova-compute-ironic container
|
||||
vars:
|
||||
service_name: "nova-compute-ironic"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
service: "{{ nova_cell_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
@ -207,34 +179,3 @@
|
||||
- Restart nova-compute container
|
||||
- Restart nova-compute-ironic container
|
||||
- Restart nova-compute-fake containers
|
||||
|
||||
# NOTE(mgoddard): Currently (just prior to Stein release), sending SIGHUP to
|
||||
# nova compute services leaves them in a broken state in which they cannot
|
||||
# start new instances. The following error is seen in the logs:
|
||||
# "In shutdown, no new events can be scheduled"
|
||||
# To work around this we restart the nova-compute services.
|
||||
# Speaking to the nova team, this seems to be an issue in oslo.service,
|
||||
# with a fix proposed here: https://review.openstack.org/#/c/641907.
|
||||
# This issue also seems to affect the proxy services, which exit non-zero in
|
||||
# reponse to a SIGHUP, so restart those too.
|
||||
# The issue actually affects all nova services, since they remain with RPC
|
||||
# version pinned to the previous release:
|
||||
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
|
||||
# TODO(mgoddard): Use SIGHUP when this bug has been fixed.
|
||||
|
||||
- name: Restart nova services to remove RPC version cap
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: restart_container
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- item.key in nova_services_require_nova_conf
|
||||
with_dict: "{{ nova_services }}"
|
||||
listen:
|
||||
- Restart nova-compute container
|
||||
- Restart nova-compute-ironic container
|
||||
- Restart nova-compute-fake containers
|
||||
|
@ -1,53 +1,41 @@
|
||||
---
|
||||
- name: Creating Nova databases
|
||||
- name: Creating Nova cell database
|
||||
become: true
|
||||
kolla_toolbox:
|
||||
module_name: mysql_db
|
||||
module_args:
|
||||
login_host: "{{ database_address }}"
|
||||
login_port: "{{ database_port }}"
|
||||
login_user: "{{ database_user }}"
|
||||
login_password: "{{ database_password }}"
|
||||
name: "{{ item }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
with_items:
|
||||
- "{{ nova_database_name }}"
|
||||
- "{{ nova_database_name }}_cell0"
|
||||
- "{{ nova_api_database_name }}"
|
||||
login_host: "{{ nova_cell_database_address }}"
|
||||
login_port: "{{ nova_cell_database_port }}"
|
||||
login_user: "{{ nova_cell_database_admin_user }}"
|
||||
login_password: "{{ nova_cell_database_admin_password }}"
|
||||
name: "{{ nova_cell_database_name }}"
|
||||
when:
|
||||
- not use_preconfigured_databases | bool
|
||||
- inventory_hostname == groups[nova_cell_conductor_group][0]
|
||||
|
||||
- name: Creating Nova databases user and setting permissions
|
||||
- name: Creating Nova cell database user and setting permissions
|
||||
become: true
|
||||
kolla_toolbox:
|
||||
module_name: mysql_user
|
||||
module_args:
|
||||
login_host: "{{ database_address }}"
|
||||
login_port: "{{ database_port }}"
|
||||
login_user: "{{ database_user }}"
|
||||
login_password: "{{ database_password }}"
|
||||
name: "{{ item.database_username }}"
|
||||
password: "{{ item.database_password }}"
|
||||
login_host: "{{ nova_cell_database_address }}"
|
||||
login_port: "{{ nova_cell_database_port }}"
|
||||
login_user: "{{ nova_cell_database_admin_user }}"
|
||||
login_password: "{{ nova_cell_database_admin_password }}"
|
||||
name: "{{ nova_cell_database_user }}"
|
||||
password: "{{ nova_cell_database_password }}"
|
||||
host: "%"
|
||||
priv: "{{ item.database_name }}.*:ALL"
|
||||
priv: "{{ nova_cell_database_name }}.*:ALL"
|
||||
append_privs: "yes"
|
||||
with_items:
|
||||
- database_name: "{{ nova_database_name }}"
|
||||
database_username: "{{ nova_database_user }}"
|
||||
database_password: "{{ nova_database_password }}"
|
||||
- database_name: "{{ nova_database_name }}_cell0"
|
||||
database_username: "{{ nova_database_user }}"
|
||||
database_password: "{{ nova_database_password }}"
|
||||
- database_name: "{{ nova_api_database_name }}"
|
||||
database_username: "{{ nova_api_database_user }}"
|
||||
database_password: "{{ nova_api_database_password }}"
|
||||
loop_control:
|
||||
label: "{{ item.database_name }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
when:
|
||||
- not use_preconfigured_databases | bool
|
||||
- inventory_hostname == groups[nova_cell_conductor_group][0]
|
||||
no_log: true
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
- import_tasks: rabbitmq.yml
|
||||
|
||||
- import_tasks: config_bootstrap.yml
|
||||
|
||||
- import_tasks: bootstrap_service.yml
|
||||
|
||||
- import_tasks: create_cells.yml
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
- name: Running Nova bootstrap container
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
- name: Running Nova cell bootstrap container
|
||||
become: true
|
||||
vars:
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
@ -10,11 +10,12 @@
|
||||
environment:
|
||||
KOLLA_UPGRADE:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ nova_api.image }}"
|
||||
image: "{{ nova_conductor.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_nova"
|
||||
name: "nova_cell_bootstrap"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
|
||||
register: bootstrap_result
|
||||
changed_when: bootstrap_result.stdout | default("") | length > 0
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0]
|
||||
|
5
ansible/roles/nova-cell/tasks/bootstrap_upgrade.yml
Normal file
5
ansible/roles/nova-cell/tasks/bootstrap_upgrade.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# For upgrade, we need to apply DB schema migrations to the cell databases.
|
||||
|
||||
- import_tasks: config_bootstrap.yml
|
||||
- import_tasks: bootstrap_service.yml
|
12
ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml
Normal file
12
ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Configure HAProxy for one cell for a particular console proxy type.
|
||||
- import_role:
|
||||
role: haproxy-config
|
||||
vars:
|
||||
project_services: "{{ cell_proxy_project_services | namespace_haproxy_for_cell(cell_name) }}"
|
||||
# Default is necessary because this play may not be targetting the hosts in
|
||||
# the cell_proxy_group group, and therefore they would not have role
|
||||
# defaults defined. If we put this variable in group_vars, then it cannot
|
||||
# be overridden by the inventory.
|
||||
cell_name: "{{ hostvars[groups[cell_proxy_group][0]]['nova_cell_name'] | default('') }}"
|
||||
tags: always
|
@ -7,7 +7,7 @@
|
||||
become: true
|
||||
with_items:
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Copying over ceph.conf(s)
|
||||
vars:
|
||||
@ -23,7 +23,7 @@
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt"
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
notify:
|
||||
- Restart {{ item }} container
|
||||
|
||||
@ -64,7 +64,7 @@
|
||||
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
notify:
|
||||
- Restart nova-compute container
|
||||
|
||||
@ -75,7 +75,7 @@
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
@ -94,7 +94,7 @@
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
@ -116,4 +116,4 @@
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Check nova containers
|
||||
- name: Check nova-cell containers
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "compare_container"
|
||||
@ -15,6 +15,6 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ nova_services }}"
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
@ -62,7 +62,7 @@
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- enable_nova_fake | bool
|
||||
notify:
|
||||
- Restart nova-compute-fake containers
|
||||
|
@ -9,7 +9,7 @@
|
||||
- { name: "net.ipv4.conf.default.rp_filter", value: "{{ nova_compute_host_rp_filter_mode }}"}
|
||||
when:
|
||||
- set_sysctl | bool
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
@ -22,22 +22,18 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ nova_services }}"
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
|
||||
- include_tasks: ceph.yml
|
||||
when:
|
||||
- enable_ceph | bool and nova_backend == "rbd"
|
||||
- inventory_hostname in groups['ceph-mon'] or
|
||||
inventory_hostname in groups['compute'] or
|
||||
inventory_hostname in groups['nova-api'] or
|
||||
inventory_hostname in groups['nova-conductor'] or
|
||||
inventory_hostname in groups['nova-novncproxy'] or
|
||||
inventory_hostname in groups['nova-scheduler']
|
||||
- inventory_hostname in groups[nova_cell_conductor_group] or
|
||||
inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- include_tasks: external_ceph.yml
|
||||
when:
|
||||
- not enable_ceph | bool and (nova_backend == "rbd" or cinder_backend_ceph | bool)
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Check if policies shall be overwritten
|
||||
local_action: stat path="{{ item }}"
|
||||
@ -65,7 +61,7 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ nova_services }}"
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
@ -74,7 +70,7 @@
|
||||
xenapi_facts: "{{ lookup('file', xenapi_facts_root + '/' + inventory_hostname + '/' + xenapi_facts_file) | from_json }}"
|
||||
when:
|
||||
- nova_compute_virt_type == 'xenapi'
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Copying over nova.conf
|
||||
become: true
|
||||
@ -92,15 +88,15 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- item.key in nova_services_require_nova_conf
|
||||
with_dict: "{{ nova_services }}"
|
||||
- item.key in nova_cell_services_require_nova_conf
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
- name: Copying over libvirt configuration
|
||||
become: true
|
||||
vars:
|
||||
service: "{{ nova_services['nova-libvirt'] }}"
|
||||
service: "{{ nova_cell_services['nova-libvirt'] }}"
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}"
|
||||
@ -117,7 +113,7 @@
|
||||
- name: Copying over libvirt TLS keys (nova-libvirt)
|
||||
include_tasks: "config-libvirt-tls.yml"
|
||||
vars:
|
||||
service: "{{ nova_services['nova-libvirt'] }}"
|
||||
service: "{{ nova_cell_services['nova-libvirt'] }}"
|
||||
service_name: nova-libvirt
|
||||
file: "{{ item }}"
|
||||
when:
|
||||
@ -135,7 +131,7 @@
|
||||
- name: Copying over libvirt TLS keys (nova-compute)
|
||||
include_tasks: "config-libvirt-tls.yml"
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
service: "{{ nova_cell_services['nova-compute'] }}"
|
||||
service_name: nova-compute
|
||||
file: "{{ item }}"
|
||||
when:
|
||||
@ -151,7 +147,7 @@
|
||||
- name: Copying files for nova-ssh
|
||||
become: true
|
||||
vars:
|
||||
service: "{{ nova_services['nova-ssh'] }}"
|
||||
service: "{{ nova_cell_services['nova-ssh'] }}"
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}"
|
||||
@ -169,7 +165,7 @@
|
||||
|
||||
- name: Copying VMware vCenter CA file
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
service: "{{ nova_cell_services['nova-compute'] }}"
|
||||
copy:
|
||||
src: "{{ node_custom_config }}/vmware_ca"
|
||||
dest: "{{ node_config_directory }}/nova-compute/vmware_ca"
|
||||
@ -184,7 +180,7 @@
|
||||
|
||||
- name: Copying 'release' file for nova_compute
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
service: "{{ nova_cell_services['nova-compute'] }}"
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/nova-compute/release"
|
||||
@ -203,16 +199,6 @@
|
||||
|
||||
- name: Copying over existing policy file
|
||||
become: true
|
||||
vars:
|
||||
services_require_policy_json:
|
||||
- nova-api
|
||||
- nova-compute
|
||||
- nova-compute-ironic
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-scheduler
|
||||
- nova-spicehtml5proxy
|
||||
template:
|
||||
src: "{{ nova_policy_file_path }}"
|
||||
dest: "{{ node_config_directory }}/{{ item.key }}/{{ nova_policy_file }}"
|
||||
@ -221,8 +207,8 @@
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_policy_file is defined
|
||||
- item.key in services_require_policy_json
|
||||
with_dict: "{{ nova_services }}"
|
||||
- item.key in nova_cell_services_require_policy_json
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
|
35
ansible/roles/nova-cell/tasks/config_bootstrap.yml
Normal file
35
ansible/roles/nova-cell/tasks/config_bootstrap.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
# Generate configuration for bootstrapping containers.
|
||||
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/nova-cell-bootstrap"
|
||||
state: "directory"
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
mode: "0770"
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0]
|
||||
|
||||
- name: Copying over config.json files for nova-cell-bootstrap
|
||||
become: true
|
||||
template:
|
||||
src: "nova-cell-bootstrap.json.j2"
|
||||
dest: "{{ node_config_directory }}/nova-cell-bootstrap/config.json"
|
||||
mode: "0660"
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0]
|
||||
|
||||
- name: Copying over nova.conf for nova-cell-bootstrap
|
||||
become: true
|
||||
vars:
|
||||
service_name: "nova-cell-bootstrap"
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/nova.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/nova.conf"
|
||||
- "{{ node_custom_config }}/nova/nova-conductor.conf"
|
||||
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
|
||||
dest: "{{ node_config_directory }}/nova-cell-bootstrap/nova.conf"
|
||||
mode: "0660"
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0]
|
@ -1,128 +1,53 @@
|
||||
---
|
||||
- name: Create cell0 mappings
|
||||
- import_tasks: get_cell_settings.yml
|
||||
|
||||
- name: Create cell
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0'
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 create_cell{% if nova_cell_name %} --name {{ nova_cell_name }}{% endif %}'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell0_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: map_cell0
|
||||
changed_when:
|
||||
- map_cell0 is success
|
||||
- '"Cell0 is already setup" not in map_cell0.stdout'
|
||||
failed_when:
|
||||
- map_cell0.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
when: map_cell0.changed
|
||||
|
||||
- name: Get list of existing cells
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 list_cells --verbose'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "list_cells_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: existing_cells_list
|
||||
changed_when: false
|
||||
failed_when:
|
||||
- existing_cells_list.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- name: Check if a base cell already exists
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
# We match lines containing a UUID in a column (except the one used by
|
||||
# cell0), followed by two other columns, the first containing the transport
|
||||
# URL and the second the database connection. For example:
|
||||
#
|
||||
# | None | 68a3f49e-27ec-422f-9e2e-2a4e5dc8291b | rabbit://openstack:password@1.2.3.4:5672 | mysql+pymysql://nova:password@1.2.3.4:3306/nova | False |
|
||||
#
|
||||
# NOTE(priteau): regexp doesn't support passwords containing spaces
|
||||
regexp: '\| +(?!00000000-0000-0000-0000-000000000000)([0-9a-f\-]+) +\| +([^ ]+) +\| +([^ ]+) +\| +([^ ]+) +\|$'
|
||||
set_fact:
|
||||
existing_cells: "{{ existing_cells_list.stdout | regex_findall(regexp, multiline=True) }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- name: Create base cell for legacy instances
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 create_cell'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
image: "{{ nova_conductor.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: base_cell
|
||||
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
|
||||
register: nova_cell_create
|
||||
changed_when:
|
||||
- base_cell is success
|
||||
- nova_cell_create is success
|
||||
failed_when:
|
||||
- base_cell.rc != 0
|
||||
- '"already exists" not in base_cell.stdout'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
when: existing_cells | length == 0
|
||||
- nova_cell_create.rc != 0
|
||||
- '"already exists" not in nova_cell_create.stdout'
|
||||
when:
|
||||
- inventory_hostname == groups[nova_conductor.group][0] | default(None)
|
||||
- nova_cell_settings | length == 0
|
||||
|
||||
- name: Update base cell for legacy instances
|
||||
- name: Update cell
|
||||
vars:
|
||||
connection_url: "mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}"
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
nova_cell_database_url: "mysql+pymysql://{{ nova_cell_database_user }}:{{ nova_cell_database_password }}@{{ nova_cell_database_address | put_address_in_context('url') }}:{{ nova_cell_database_port }}/{{ nova_cell_database_name }}"
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: "bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 update_cell --cell_uuid {{ existing_cells[0][0] }}'"
|
||||
command: "bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 update_cell --cell_uuid {{ nova_cell_settings.cell_uuid }}'"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
image: "{{ nova_conductor.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: base_cell
|
||||
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
|
||||
register: nova_cell_updated
|
||||
changed_when:
|
||||
- base_cell is success
|
||||
- nova_cell_updated is success
|
||||
failed_when:
|
||||
- base_cell.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
- nova_cell_updated.rc != 0
|
||||
when:
|
||||
- existing_cells | length == 1
|
||||
- existing_cells[0][1] != rpc_transport_url or existing_cells[0][2] != connection_url
|
||||
|
||||
- name: Print warning if a duplicate cell is detected
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
fail:
|
||||
msg: Multiple base cells detected, manual cleanup with `nova-manage cell_v2` may be required.
|
||||
ignore_errors: yes
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
when:
|
||||
- existing_cells | length > 1
|
||||
- inventory_hostname == groups[nova_conductor.group][0] | default(None)
|
||||
- nova_cell_settings | length > 0
|
||||
- nova_cell_settings.cell_message_queue != nova_cell_notify_transport_url or nova_cell_settings.cell_database != nova_cell_database_url
|
||||
|
@ -1,10 +1,7 @@
|
||||
---
|
||||
- include_tasks: register.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
|
||||
- include_tasks: bootstrap_xenapi.yml
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- nova_compute_virt_type == "xenapi"
|
||||
|
||||
- include_tasks: clone.yml
|
||||
@ -15,17 +12,10 @@
|
||||
- include_tasks: config-nova-fake.yml
|
||||
when:
|
||||
- enable_nova_fake | bool
|
||||
- inventory_hostname in groups['compute']
|
||||
|
||||
- include_tasks: bootstrap.yml
|
||||
when: inventory_hostname in groups['nova-api'] or
|
||||
inventory_hostname in groups['compute']
|
||||
|
||||
- include_tasks: create_cells.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- include_tasks: discover_computes.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
when: inventory_hostname in groups[nova_cell_conductor_group]
|
||||
|
@ -9,23 +9,22 @@
|
||||
# is similar to what nova uses internally as its default for the
|
||||
# [DEFAULT] host config option.
|
||||
virt_compute_service_hosts: >-
|
||||
{{ groups['compute'] |
|
||||
{{ groups[nova_cell_compute_group] |
|
||||
intersect(ansible_play_batch) |
|
||||
map('extract', hostvars, 'ansible_nodename') |
|
||||
list }}
|
||||
# For ironic, use {{ansible_hostname}}-ironic since this is what we
|
||||
# configure for [DEFAULT] host in nova.conf.
|
||||
ironic_compute_service_hosts: >-
|
||||
{{ (groups['nova-compute-ironic'] |
|
||||
{{ (groups[nova_cell_compute_ironic_group] |
|
||||
intersect(ansible_play_batch) |
|
||||
map('extract', hostvars, 'ansible_hostname') |
|
||||
map('regex_replace', '^(.*)$', '\1-ironic') |
|
||||
list)
|
||||
if enable_ironic | bool else [] }}
|
||||
if nova_cell_services['nova-compute-ironic'].enabled | bool else [] }}
|
||||
set_fact:
|
||||
expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0] | default(None)
|
||||
|
||||
- name: Waiting for nova-compute services to register themselves
|
||||
become: true
|
||||
@ -44,8 +43,6 @@
|
||||
compute service list --format json --column Host --service nova-compute
|
||||
register: nova_compute_services
|
||||
changed_when: false
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
retries: 20
|
||||
delay: 10
|
||||
until:
|
||||
@ -63,20 +60,22 @@
|
||||
map(attribute='Host') |
|
||||
list)
|
||||
is superset(expected_compute_service_hosts)
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0] | default(None)
|
||||
|
||||
- import_tasks: get_cell_settings.yml
|
||||
|
||||
- name: Fail if cell settings not found
|
||||
fail:
|
||||
msg: >-
|
||||
Unable to find settings for {{ nova_cell_name or 'the default cell' }}.
|
||||
when:
|
||||
- inventory_hostname == groups[nova_cell_conductor_group][0] | default(None)
|
||||
- not nova_cell_settings
|
||||
|
||||
# TODO(yoctozepto): no need to do --by-service if ironic not used
|
||||
- name: Discover nova hosts
|
||||
become: true
|
||||
command: >
|
||||
docker exec nova_api nova-manage cell_v2 discover_hosts --by-service
|
||||
docker exec nova_conductor nova-manage cell_v2 discover_hosts --by-service --cell_uuid {{ nova_cell_settings.cell_uuid }}
|
||||
changed_when: False
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
|
||||
# NOTE(yoctozepto): SIGHUP is probably unnecessary
|
||||
- name: Refresh cell cache in nova scheduler
|
||||
become: true
|
||||
command: docker kill --signal HUP nova_scheduler
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname in groups['nova-scheduler']
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0] | default(None)
|
||||
|
@ -7,7 +7,7 @@
|
||||
become: true
|
||||
with_items:
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
||||
- name: Check nova keyring file
|
||||
local_action: stat path="{{ node_custom_config }}/nova/ceph.client.nova.keyring"
|
||||
@ -38,7 +38,7 @@
|
||||
- nova-compute
|
||||
- nova-libvirt
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- nova_backend == "rbd"
|
||||
- external_ceph_cephx_enabled | bool
|
||||
notify:
|
||||
@ -54,7 +54,7 @@
|
||||
- nova-compute
|
||||
- nova-libvirt
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- nova_backend == "rbd"
|
||||
notify:
|
||||
- Restart {{ item }} container
|
||||
@ -66,7 +66,7 @@
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
@ -103,7 +103,7 @@
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- inventory_hostname in groups[nova_cell_compute_group]
|
||||
- item.enabled | bool
|
||||
- external_ceph_cephx_enabled | bool
|
||||
with_items:
|
||||
@ -126,4 +126,4 @@
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
when: inventory_hostname in groups[nova_cell_compute_group]
|
||||
|
28
ansible/roles/nova-cell/tasks/get_cell_settings.yml
Normal file
28
ansible/roles/nova-cell/tasks/get_cell_settings.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Get a list of existing cells
|
||||
vars:
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 list_cells --verbose'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_conductor.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "nova_list_cells"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
|
||||
register: existing_cells_list
|
||||
changed_when: false
|
||||
failed_when:
|
||||
- existing_cells_list.rc != 0
|
||||
when: inventory_hostname == groups[nova_conductor.group][0] | default(None)
|
||||
|
||||
- name: Extract current cell settings from list
|
||||
vars:
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
set_fact:
|
||||
nova_cell_settings: "{{ existing_cells_list | extract_cell(nova_cell_name) }}"
|
||||
when: inventory_hostname == groups[nova_conductor.group][0] | default(None)
|
@ -1,7 +1,123 @@
|
||||
---
|
||||
- name: "Configure haproxy for {{ project_name }}"
|
||||
import_role:
|
||||
role: haproxy-config
|
||||
# NOTE(mgoddard): Load balancer configuration for this role works a little
|
||||
# differently than usual. We need to configure an HAProxy frontend for each
|
||||
# enabled console proxy service (novnc, spicehtml5, serial), in each cell. We
|
||||
# do this by configuring a unique port for each service in each cell, and
|
||||
# proxying traffic on that port to the set of console proxies in the cell.
|
||||
#
|
||||
# We currently don't have a global list of all cells, so we are using the
|
||||
# group membership as a guide. We'll take novncproxy as an example. We find the
|
||||
# set of unique values of the 'nova_cell_novncproxy_group' variable for hosts
|
||||
# in the global 'nova-novncproxy' group - there should be one for each cell.
|
||||
# Then for each of those groups, we run the haproxy-config role, using the
|
||||
# proxy configuration for a host in that group. This allows us to have
|
||||
# different ports for each cell, and potentially a different console type
|
||||
# (nova_console) also.
|
||||
#
|
||||
# Here we depend on the lazy nature of Jinja, referencing the variable
|
||||
# 'cell_proxy_group' in 'cell_proxy_project_services' that will be the loop_var
|
||||
# in proxy_loadbalancer.yml.
|
||||
|
||||
- import_tasks: proxy_loadbalancer.yml
|
||||
vars:
|
||||
project_services: "{{ nova_services }}"
|
||||
# Default is necessary because this play may not be targetting the hosts in
|
||||
# the nova-novncproxy group, and therefore they would not have role
|
||||
# defaults defined. If we put these variables in group_vars, then they
|
||||
# cannot be overridden by the inventory.
|
||||
cell_proxy_groups: >-
|
||||
{{ groups['nova-novncproxy'] |
|
||||
map('extract', hostvars, 'nova_cell_novncproxy_group') |
|
||||
map('default', 'nova-novncproxy') |
|
||||
unique |
|
||||
list }}
|
||||
cell_proxy_service_name: nova-novncproxy
|
||||
cell_proxy_project_services:
|
||||
nova-novncproxy:
|
||||
group: "{{ cell_proxy_group }}"
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'novnc' }}"
|
||||
haproxy:
|
||||
nova_novncproxy:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_listen_port'] }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel 1h"
|
||||
nova_novncproxy_external:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_listen_port'] }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel 1h"
|
||||
tags: always
|
||||
|
||||
- import_tasks: proxy_loadbalancer.yml
|
||||
vars:
|
||||
# Default is necessary because this play may not be targetting the hosts in
|
||||
# the nova-spicehtml5proxy group, and therefore they would not have role
|
||||
# defaults defined. If we put these variables in group_vars, then they
|
||||
# cannot be overridden by the inventory.
|
||||
cell_proxy_groups: >-
|
||||
{{ groups['nova-spicehtml5proxy'] |
|
||||
map('extract', hostvars, 'nova_cell_spicehtml5proxy_group') |
|
||||
map('default', 'nova-spicehtml5proxy') |
|
||||
unique |
|
||||
list }}
|
||||
cell_proxy_service_name: nova-spicehtml5proxy
|
||||
cell_proxy_project_services:
|
||||
nova-spicehtml5proxy:
|
||||
group: "{{ nova_cell_spicehtml5proxy_group }}"
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'spice' }}"
|
||||
haproxy:
|
||||
nova_spicehtml5proxy:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'spice' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_listen_port'] }}"
|
||||
nova_spicehtml5proxy_external:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'spice' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_listen_port'] }}"
|
||||
tags: always
|
||||
|
||||
- import_tasks: proxy_loadbalancer.yml
|
||||
vars:
|
||||
# Default is necessary because this play may not be targetting the hosts in
|
||||
# the nova-serialproxy group, and therefore they would not have role
|
||||
# defaults defined. If we put these variables in group_vars, then they
|
||||
# cannot be overridden by the inventory.
|
||||
cell_proxy_groups: >-
|
||||
{{ groups['nova-serialproxy'] |
|
||||
map('extract', hostvars, 'nova_cell_serialproxy_group') |
|
||||
map('default', 'nova-serialproxy') |
|
||||
unique |
|
||||
list }}
|
||||
cell_proxy_service_name: nova-serialproxy
|
||||
cell_proxy_project_services:
|
||||
nova-serialproxy:
|
||||
group: "{{ nova_cell_serialproxy_group }}"
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['enable_nova_serialconsole_proxy'] | bool }}"
|
||||
haproxy:
|
||||
nova_serialconsole_proxy:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['enable_nova_serialconsole_proxy'] | bool }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_listen_port'] }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
nova_serialconsole_proxy_external:
|
||||
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['enable_nova_serialconsole_proxy'] | bool }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_port'] }}"
|
||||
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_listen_port'] }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
tags: always
|
||||
|
19
ansible/roles/nova-cell/tasks/online_data_migrations.yml
Normal file
19
ansible/roles/nova-cell/tasks/online_data_migrations.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Run Nova cell online database migrations
|
||||
vars:
|
||||
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_OSM:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ nova_conductor.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "nova_cell_online_data_migrations"
|
||||
restart_policy: "no"
|
||||
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
|
||||
when: inventory_hostname == groups[nova_cell_conductor_group][0]
|
@ -3,55 +3,26 @@
|
||||
become: true
|
||||
kolla_container_facts:
|
||||
name:
|
||||
- nova_api
|
||||
- nova_libvirt
|
||||
- nova_novncproxy
|
||||
- nova_serialproxy
|
||||
- nova_spicehtml5proxy
|
||||
- nova_ssh
|
||||
- nova_libvirt
|
||||
register: container_facts
|
||||
|
||||
- name: Checking available compute nodes in inventory
|
||||
vars:
|
||||
nova_compute_ironic: "{{ nova_services['nova-compute-ironic'] }}"
|
||||
nova_compute_ironic: "{{ nova_cell_services['nova-compute-ironic'] }}"
|
||||
fail:
|
||||
msg: >
|
||||
At least 1 compute node required in inventory when ironic is disabled.
|
||||
when:
|
||||
- groups['compute'] | length < 1
|
||||
- groups[nova_cell_compute_group] | length < 1
|
||||
- not nova_compute_ironic.enabled | bool
|
||||
|
||||
- name: Checking free port for Nova API
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_api_listen_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_api'] is not defined
|
||||
- inventory_hostname in groups[nova_api.group]
|
||||
- nova_api.enabled | bool
|
||||
|
||||
- name: Checking free port for Nova Metadata
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_metadata_listen_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_api'] is not defined
|
||||
- inventory_hostname in groups[nova_api.group]
|
||||
- nova_api.enabled | bool
|
||||
|
||||
- name: Checking free port for Nova NoVNC Proxy
|
||||
vars:
|
||||
nova_novncproxy: "{{ nova_services['nova-novncproxy'] }}"
|
||||
nova_novncproxy: "{{ nova_cell_services['nova-novncproxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_novncproxy_listen_port }}"
|
||||
@ -65,7 +36,7 @@
|
||||
|
||||
- name: Checking free port for Nova Serial Proxy
|
||||
vars:
|
||||
nova_serialproxy: "{{ nova_services['nova-serialproxy'] }}"
|
||||
nova_serialproxy: "{{ nova_cell_services['nova-serialproxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_serialproxy_listen_port }}"
|
||||
@ -79,7 +50,7 @@
|
||||
|
||||
- name: Checking free port for Nova Spice HTML5 Proxy
|
||||
vars:
|
||||
nova_spicehtml5proxy: "{{ nova_services['nova-spicehtml5proxy'] }}"
|
||||
nova_spicehtml5proxy: "{{ nova_cell_services['nova-spicehtml5proxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
@ -93,7 +64,7 @@
|
||||
|
||||
- name: Checking free port for Nova SSH
|
||||
vars:
|
||||
nova_ssh: "{{ nova_services['nova-ssh'] }}"
|
||||
nova_ssh: "{{ nova_cell_services['nova-ssh'] }}"
|
||||
wait_for:
|
||||
host: "{{ migration_interface_address }}"
|
||||
port: "{{ nova_ssh_port }}"
|
||||
@ -107,7 +78,7 @@
|
||||
|
||||
- name: Checking free port for Nova Libvirt
|
||||
vars:
|
||||
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
|
||||
nova_libvirt: "{{ nova_cell_services['nova-libvirt'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_libvirt_port }}"
|
||||
@ -121,7 +92,7 @@
|
||||
|
||||
- name: Checking that libvirt is not running
|
||||
vars:
|
||||
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
|
||||
nova_libvirt: "{{ nova_cell_services['nova-libvirt'] }}"
|
||||
stat: path=/var/run/libvirt/libvirt-sock
|
||||
register: result
|
||||
failed_when: result.stat.exists
|
||||
@ -129,11 +100,3 @@
|
||||
- nova_compute_virt_type in ['kvm', 'qemu']
|
||||
- container_facts['nova_libvirt'] is not defined
|
||||
- inventory_hostname in groups[nova_libvirt.group]
|
||||
|
||||
# TODO(mgoddard): Remove this task in the Ussuri cycle.
|
||||
- name: Check that legacy upgrade is not enabled
|
||||
fail:
|
||||
msg: >
|
||||
Legacy upgrade support has been removed. 'nova_enable_rolling_upgrade'
|
||||
should no longer be set.
|
||||
when: not nova_enable_rolling_upgrade | default(true) | bool
|
||||
|
19
ansible/roles/nova-cell/tasks/proxy_loadbalancer.yml
Normal file
19
ansible/roles/nova-cell/tasks/proxy_loadbalancer.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
# Configure HAProxy for all cells for a particular proxy type.
|
||||
# Iterate over each cell group, creating HAProxy config for that cell.
|
||||
|
||||
- name: "Configure HAProxy for {{ cell_proxy_service_name }}"
|
||||
include_tasks: cell_proxy_loadbalancer.yml
|
||||
vars:
|
||||
# NOTE(mgoddard): Defining this here rather than in
|
||||
# cell_proxy_loadbalancer.yml due to a weird issue seen on Ansible 2.8. If
|
||||
# project_name is specified as a role variable for the import, it seems to
|
||||
# get stuck and override the variable for subsequent imports of the
|
||||
# haproxy-config role for other services. By that point
|
||||
# cell_proxy_service_name is no longer defined, so it fails.
|
||||
project_name: "nova-cell:{{ cell_proxy_service_name }}"
|
||||
with_items: "{{ cell_proxy_groups }}"
|
||||
when: groups[cell_proxy_group] | length > 0
|
||||
loop_control:
|
||||
loop_var: cell_proxy_group
|
||||
tags: always
|
@ -8,4 +8,4 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ nova_services }}"
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
|
28
ansible/roles/nova-cell/tasks/rabbitmq.yml
Normal file
28
ansible/roles/nova-cell/tasks/rabbitmq.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
# Create RabbitMQ users and vhosts.
|
||||
- block:
|
||||
- import_role:
|
||||
name: service-rabbitmq
|
||||
vars:
|
||||
service_rabbitmq_users: "{{ nova_cell_rpc_rabbitmq_users }}"
|
||||
# Create users for cells in parallel.
|
||||
service_rabbitmq_run_once: false
|
||||
service_rabbitmq_when: "{{ inventory_hostname == groups[nova_cell_conductor_group][0] | default }}"
|
||||
# Delegate to a host in the RPC group.
|
||||
service_rabbitmq_delegate_host: "{{ groups[nova_cell_rpc_group_name][0] | default }}"
|
||||
|
||||
- import_role:
|
||||
name: service-rabbitmq
|
||||
vars:
|
||||
service_rabbitmq_users: "{{ nova_cell_notify_rabbitmq_users }}"
|
||||
# Create users for cells in parallel.
|
||||
service_rabbitmq_run_once: false
|
||||
service_rabbitmq_when: "{{ inventory_hostname == groups[nova_cell_conductor_group][0] | default }}"
|
||||
# Delegate to a host in the notify group.
|
||||
service_rabbitmq_delegate_host: "{{ groups[nova_cell_notify_group_name][0] | default }}"
|
||||
when:
|
||||
- nova_cell_rpc_group_name != nova_cell_notify_group_name or
|
||||
nova_cell_rpc_rabbitmq_users != nova_cell_notify_rabbitmq_users
|
||||
|
||||
when: nova_cell_rpc_transport == 'rabbit'
|
||||
tags: always
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- import_role:
|
||||
name: service-ks-register
|
||||
vars:
|
||||
service_ks_register_auth: "{{ openstack_nova_auth }}"
|
||||
service_ks_register_services: "{{ nova_ks_services }}"
|
||||
service_ks_register_users: "{{ nova_ks_users }}"
|
||||
tags: always
|
40
ansible/roles/nova-cell/tasks/reload.yml
Normal file
40
ansible/roles/nova-cell/tasks/reload.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
# NOTE(mgoddard): Currently (just prior to Stein release), sending SIGHUP to
|
||||
# nova compute services leaves them in a broken state in which they cannot
|
||||
# start new instances. The following error is seen in the logs:
|
||||
# "In shutdown, no new events can be scheduled"
|
||||
# To work around this we restart the nova-compute services.
|
||||
# Speaking to the nova team, this seems to be an issue in oslo.service,
|
||||
# with a fix proposed here: https://review.openstack.org/#/c/641907.
|
||||
# This issue also seems to affect the proxy services, which exit non-zero in
|
||||
# reponse to a SIGHUP, so restart those too.
|
||||
# The issue actually affects all nova services, since they remain with RPC
|
||||
# version pinned to the previous release:
|
||||
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
|
||||
# TODO(mgoddard): Use SIGHUP when this bug has been fixed.
|
||||
|
||||
# NOTE(mgoddard): We use recreate_or_restart_container to cover the case where
|
||||
# nova_safety_upgrade is "yes", and we need to recreate all containers.
|
||||
|
||||
# FIXME(mgoddard): Need to always do this since nova-compute handlers will not
|
||||
# generally fire on controllers.
|
||||
- name: Reload nova cell services to remove RPC version cap
|
||||
vars:
|
||||
service: "{{ nova_cell_services[item] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
environment: "{{ service.environment|default(omit) }}"
|
||||
pid_mode: "{{ service.pid_mode|default('') }}"
|
||||
ipc_mode: "{{ service.ipc_mode|default(omit) }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_items: "{{ nova_cell_services_require_nova_conf }}"
|
@ -2,21 +2,6 @@
|
||||
# Create new set of configs on nodes
|
||||
- include_tasks: config.yml
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
|
||||
- name: Stopping all nova services except nova-compute
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "stop_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
with_dict: "{{ nova_services }}"
|
||||
when:
|
||||
- "'nova-compute' not in item.key"
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_safety_upgrade | bool
|
||||
|
||||
# TODO(donghm): Flush_handlers to restart nova services
|
||||
# should be run in serial nodes to decrease downtime if
|
||||
# the previous task did not run. Update when the
|
||||
@ -24,23 +9,3 @@
|
||||
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Migrate Nova database
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_OSM:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
@ -2,5 +2,5 @@
|
||||
- import_role:
|
||||
role: service-stop
|
||||
vars:
|
||||
project_services: "{{ nova_services }}"
|
||||
project_services: "{{ nova_cell_services }}"
|
||||
service_name: "{{ project_name }}"
|
||||
|
@ -1,25 +1,16 @@
|
||||
---
|
||||
- name: Check nova upgrade status
|
||||
become: true
|
||||
command: docker exec -t nova_api nova-status upgrade check
|
||||
register: nova_upgrade_check_stdout
|
||||
when: inventory_hostname == groups['nova-api'][0]
|
||||
failed_when: false
|
||||
|
||||
- name: Upgrade status check result
|
||||
fail:
|
||||
msg:
|
||||
- "There was an upgrade status check failure!"
|
||||
- "See the detail at https://docs.openstack.org/nova/latest/cli/nova-status.html#nova-status-checks"
|
||||
vars:
|
||||
first_nova_api_host: "{{ groups['nova-api'][0] }}"
|
||||
when: hostvars[first_nova_api_host]['nova_upgrade_check_stdout']['rc'] not in [0, 1]
|
||||
|
||||
- include_tasks: rolling_upgrade.yml
|
||||
|
||||
# NOTE(jeffrey4l): Remove this task in U cycle.
|
||||
- name: Remove nova-consoleauth container
|
||||
- name: Stopping nova cell services
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "remove_container"
|
||||
name: "nova_consoleauth"
|
||||
action: "stop_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
with_dict: "{{ nova_cell_services }}"
|
||||
when:
|
||||
- "'nova-compute' not in item.key"
|
||||
- item.key in nova_cell_services_require_nova_conf
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_safety_upgrade | bool
|
||||
|
||||
- include_tasks: rolling_upgrade.yml
|
||||
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-api",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"command": "false",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova"
|
||||
}
|
||||
]
|
||||
}
|
@ -6,13 +6,7 @@
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-scheduler",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -6,13 +6,7 @@
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
|
@ -3,7 +3,7 @@
|
||||
connection_uri = "qemu+tls://{{ migration_hostname }}/system"
|
||||
live_migration_uri = "qemu+tls://%s/system"
|
||||
{% else %}
|
||||
connection_uri = "qemu+tcp://{{ migration_interface_address }}/system"
|
||||
connection_uri = "qemu+tcp://{{ migration_interface_address | put_address_in_context('url') }}/system"
|
||||
{% endif %}
|
||||
{% if enable_ceph | bool and nova_backend == "rbd" %}
|
||||
images_type = rbd
|
||||
|
@ -6,14 +6,6 @@ log_dir = /var/log/kolla/nova
|
||||
|
||||
state_path = /var/lib/nova
|
||||
|
||||
osapi_compute_listen = {{ api_interface_address }}
|
||||
osapi_compute_listen_port = {{ nova_api_listen_port }}
|
||||
osapi_compute_workers = {{ openstack_service_workers }}
|
||||
metadata_workers = {{ openstack_service_workers }}
|
||||
|
||||
metadata_listen = {{ api_interface_address }}
|
||||
metadata_listen_port = {{ nova_metadata_listen_port }}
|
||||
|
||||
allow_resize_to_same_host = true
|
||||
|
||||
{% if service_name == "nova-compute-ironic" %}
|
||||
@ -47,17 +39,7 @@ compute_monitors=nova.compute.monitors.cpu.virt_driver
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
transport_url = {{ rpc_transport_url }}
|
||||
|
||||
{% if enable_blazar | bool %}
|
||||
[filter_scheduler]
|
||||
available_filters = nova.scheduler.filters.all_filters
|
||||
available_filters = blazarnova.scheduler.filters.blazar_filter.BlazarFilter
|
||||
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter
|
||||
{% endif %}
|
||||
|
||||
[api]
|
||||
use_forwarded_for = true
|
||||
transport_url = {{ nova_cell_rpc_transport_url }}
|
||||
|
||||
[conductor]
|
||||
workers = {{ openstack_service_workers }}
|
||||
@ -71,8 +53,8 @@ novncproxy_host = {{ api_interface_address }}
|
||||
novncproxy_port = {{ nova_novncproxy_listen_port }}
|
||||
server_listen = {{ api_interface_address }}
|
||||
server_proxyclient_address = {{ api_interface_address }}
|
||||
{% if inventory_hostname in groups['compute'] %}
|
||||
novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn }}:{{ nova_novncproxy_port }}/vnc_auto.html
|
||||
{% if inventory_hostname in groups[nova_cell_compute_group] %}
|
||||
novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn | put_address_in_context('url') }}:{{ nova_novncproxy_port }}/vnc_auto.html
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% elif nova_console == 'spice' %}
|
||||
@ -83,8 +65,8 @@ enabled = false
|
||||
enabled = true
|
||||
server_listen = {{ api_interface_address }}
|
||||
server_proxyclient_address = {{ api_interface_address }}
|
||||
{% if inventory_hostname in groups['compute'] %}
|
||||
html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
|
||||
{% if inventory_hostname in groups[nova_cell_compute_group] %}
|
||||
html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn | put_address_in_context('url') }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
|
||||
{% endif %}
|
||||
html5proxy_host = {{ api_interface_address }}
|
||||
html5proxy_port = {{ nova_spicehtml5proxy_listen_port }}
|
||||
@ -97,7 +79,7 @@ enabled = false
|
||||
{% if enable_nova_serialconsole_proxy | bool %}
|
||||
[serial_console]
|
||||
enabled = true
|
||||
base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn }}:{{ nova_serialproxy_port }}/
|
||||
base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn | put_address_in_context('url') }}:{{ nova_serialproxy_port }}/
|
||||
serialproxy_host = {{ api_interface_address }}
|
||||
serialproxy_port = {{ nova_serialproxy_listen_port }}
|
||||
proxyclient_address = {{ api_interface_address }}
|
||||
@ -112,19 +94,16 @@ auth_type = password
|
||||
project_name = service
|
||||
user_domain_name = {{ default_user_domain_name }}
|
||||
project_domain_name = {{ default_project_domain_name }}
|
||||
endpoint_override = {{ internal_protocol }}://{{ ironic_internal_fqdn }}:{{ ironic_api_port }}/v1
|
||||
endpoint_override = {{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}/v1
|
||||
{% endif %}
|
||||
|
||||
[oslo_middleware]
|
||||
enable_proxy_headers_parsing = True
|
||||
|
||||
[oslo_concurrency]
|
||||
lock_path = /var/lib/nova/tmp
|
||||
|
||||
[glance]
|
||||
api_servers = {{ internal_protocol }}://{{ glance_internal_fqdn }}:{{ glance_api_port }}
|
||||
|
||||
num_retries = {{ groups['glance-api'] | length }}
|
||||
api_servers = {{ internal_protocol }}://{{ glance_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}
|
||||
num_retries = 3
|
||||
|
||||
{% if enable_cinder | bool %}
|
||||
[cinder]
|
||||
@ -150,35 +129,17 @@ valid_interfaces = internal
|
||||
|
||||
{% if not service_name.startswith('nova-compute') %}
|
||||
[database]
|
||||
connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
|
||||
connection = mysql+pymysql://{{ nova_cell_database_user }}:{{ nova_cell_database_password }}@{{ nova_cell_database_address | put_address_in_context('url') }}:{{ nova_cell_database_port }}/{{ nova_cell_database_name }}
|
||||
max_pool_size = 50
|
||||
max_overflow = 1000
|
||||
max_retries = -1
|
||||
|
||||
{% if service_name == 'nova-cell-bootstrap' or (service_name == 'nova-conductor' and nova_cell_conductor_has_api_database | bool) %}
|
||||
[api_database]
|
||||
connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }}
|
||||
max_retries = -1
|
||||
{% endif %}
|
||||
|
||||
[cache]
|
||||
backend = oslo_cache.memcache_pool
|
||||
enabled = True
|
||||
memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
|
||||
[keystone_authtoken]
|
||||
www_authenticate_uri = {{ keystone_internal_url }}
|
||||
auth_url = {{ keystone_admin_url }}
|
||||
auth_type = password
|
||||
project_domain_id = {{ default_project_domain_id }}
|
||||
user_domain_id = {{ default_user_domain_id }}
|
||||
project_name = service
|
||||
username = {{ nova_keystone_user }}
|
||||
password = {{ nova_keystone_password }}
|
||||
|
||||
memcache_security_strategy = ENCRYPT
|
||||
memcache_secret_key = {{ memcache_secret_key }}
|
||||
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if service_name == 'nova-compute' %}
|
||||
{% if nova_compute_virt_type in ['kvm', 'qemu'] %}
|
||||
@ -205,7 +166,7 @@ ca_file = /etc/nova/vmware_ca
|
||||
compute = auto
|
||||
|
||||
[oslo_messaging_notifications]
|
||||
transport_url = {{ notify_transport_url }}
|
||||
transport_url = {{ nova_cell_notify_transport_url }}
|
||||
{% if nova_enabled_notification_topics %}
|
||||
driver = messagingv2
|
||||
topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',') }}
|
||||
@ -213,7 +174,7 @@ topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',')
|
||||
driver = noop
|
||||
{% endif %}
|
||||
|
||||
{% if nova_policy_file is defined %}
|
||||
{% if service_name in nova_cell_services_require_policy_json and nova_policy_file is defined %}
|
||||
[oslo_policy]
|
||||
policy_file = {{ nova_policy_file }}
|
||||
{% endif %}
|
||||
@ -227,23 +188,6 @@ debug = {{ nova_logging_debug }}
|
||||
[guestfs]
|
||||
debug = {{ nova_logging_debug }}
|
||||
|
||||
[wsgi]
|
||||
api_paste_config = /etc/nova/api-paste.ini
|
||||
{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
|
||||
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
|
||||
{% endif %}
|
||||
|
||||
[scheduler]
|
||||
max_attempts = 10
|
||||
# NOTE(yoctozepto): kolla-ansible handles cell mapping by itself on each deploy
|
||||
# periodic run must be disabled to avoid random failures (where both try to map)
|
||||
# -1 is default and means periodic discovery is disabled
|
||||
discover_hosts_in_cells_interval = -1
|
||||
|
||||
{% if enable_nova_fake | bool %}
|
||||
default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
|
||||
{% endif %}
|
||||
|
||||
[placement]
|
||||
auth_type = password
|
||||
auth_url = {{ keystone_admin_url }}
|
||||
@ -286,3 +230,15 @@ connection_password = {{ xenserver_password }}
|
||||
connection_username = {{ xenserver_username }}
|
||||
connection_url = {{ xenserver_connect_protocol }}://{{ xenserver_himn_ip }}
|
||||
{% endif %}
|
||||
|
||||
# Cell specific settings from DevStack:
|
||||
# https://opendev.org/openstack/devstack/src/branch/master/lib/nova#L874
|
||||
{% if service_name.startswith("nova-compute") and enable_cells | bool %}
|
||||
[workarounds]
|
||||
disable_group_policy_check_upcall = true
|
||||
|
||||
[filter_scheduler]
|
||||
# When in superconductor mode, nova-compute can't send instance
|
||||
# info updates to the scheduler, so just disable it.
|
||||
track_instance_changes = false
|
||||
{% endif %}
|
||||
|
@ -2,22 +2,6 @@
|
||||
project_name: "nova"
|
||||
|
||||
nova_services:
|
||||
nova-libvirt:
|
||||
container_name: nova_libvirt
|
||||
group: compute
|
||||
enabled: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}"
|
||||
image: "{{ nova_libvirt_image_full }}"
|
||||
pid_mode: "host"
|
||||
privileged: True
|
||||
volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes }}"
|
||||
dimensions: "{{ nova_libvirt_dimensions }}"
|
||||
nova-ssh:
|
||||
container_name: "nova_ssh"
|
||||
group: "compute"
|
||||
image: "{{ nova_ssh_image_full }}"
|
||||
enabled: "{{ enable_nova_ssh | bool }}"
|
||||
volumes: "{{ nova_ssh_default_volumes + nova_ssh_extra_volumes }}"
|
||||
dimensions: "{{ nova_ssh_dimensions }}"
|
||||
nova-api:
|
||||
container_name: "nova_api"
|
||||
group: "nova-api"
|
||||
@ -57,28 +41,6 @@ nova_services:
|
||||
external: false
|
||||
port: "{{ rdp_port }}"
|
||||
host_group: "hyperv"
|
||||
nova-novncproxy:
|
||||
container_name: "nova_novncproxy"
|
||||
group: "nova-novncproxy"
|
||||
image: "{{ nova_novncproxy_image_full }}"
|
||||
enabled: "{{ nova_console == 'novnc' }}"
|
||||
volumes: "{{ nova_novncproxy_default_volumes + nova_novncproxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_novncproxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_novncproxy:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_novncproxy_port }}"
|
||||
listen_port: "{{ nova_novncproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel 1h"
|
||||
nova_novncproxy_external:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_novncproxy_port }}"
|
||||
listen_port: "{{ nova_novncproxy_listen_port }}"
|
||||
nova-scheduler:
|
||||
container_name: "nova_scheduler"
|
||||
group: "nova-scheduler"
|
||||
@ -86,112 +48,27 @@ nova_services:
|
||||
enabled: True
|
||||
volumes: "{{ nova_scheduler_default_volumes + nova_scheduler_extra_volumes }}"
|
||||
dimensions: "{{ nova_scheduler_dimensions }}"
|
||||
nova-spicehtml5proxy:
|
||||
container_name: "nova_spicehtml5proxy"
|
||||
group: "nova-spicehtml5proxy"
|
||||
image: "{{ nova_spicehtml5proxy_image_full }}"
|
||||
enabled: "{{ nova_console == 'spice' }}"
|
||||
volumes: "{{ nova_spicehtml5proxy_default_volumes + nova_spicehtml5proxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_spicehtml5proxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_spicehtml5proxy:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_spicehtml5proxy_port }}"
|
||||
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
nova_spicehtml5proxy_external:
|
||||
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_spicehtml5proxy_port }}"
|
||||
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
nova-serialproxy:
|
||||
container_name: "nova_serialproxy"
|
||||
group: "nova-serialproxy"
|
||||
image: "{{ nova_serialproxy_image_full }}"
|
||||
enabled: "{{ enable_nova_serialconsole_proxy | bool }}"
|
||||
volumes: "{{ nova_serialproxy_default_volumes + nova_serialproxy_extra_volumes }}"
|
||||
dimensions: "{{ nova_serialproxy_dimensions }}"
|
||||
haproxy:
|
||||
nova_serialconsole_proxy:
|
||||
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
|
||||
mode: "http"
|
||||
external: false
|
||||
port: "{{ nova_serialproxy_port }}"
|
||||
listen_port: "{{ nova_serialproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
nova_serialconsole_proxy_external:
|
||||
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
|
||||
mode: "http"
|
||||
external: true
|
||||
port: "{{ nova_serialproxy_port }}"
|
||||
listen_port: "{{ nova_serialproxy_listen_port }}"
|
||||
backend_http_extra:
|
||||
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
|
||||
nova-conductor:
|
||||
container_name: "nova_conductor"
|
||||
group: "nova-conductor"
|
||||
enabled: True
|
||||
image: "{{ nova_conductor_image_full }}"
|
||||
volumes: "{{ nova_conductor_default_volumes + nova_conductor_extra_volumes }}"
|
||||
dimensions: "{{ nova_conductor_dimensions }}"
|
||||
nova-compute:
|
||||
container_name: "nova_compute"
|
||||
group: "compute"
|
||||
image: "{{ nova_compute_image_full }}"
|
||||
environment:
|
||||
LIBGUESTFS_BACKEND: "direct"
|
||||
privileged: True
|
||||
enabled: "{{ not enable_nova_fake | bool }}"
|
||||
ipc_mode: "host"
|
||||
volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes }}"
|
||||
dimensions: "{{ nova_compute_dimensions }}"
|
||||
nova-compute-ironic:
|
||||
container_name: "nova_compute_ironic"
|
||||
group: "nova-compute-ironic"
|
||||
image: "{{ nova_compute_ironic_image_full }}"
|
||||
enabled: "{{ enable_ironic | bool }}"
|
||||
volumes: "{{ nova_compute_ironic_default_volumes + nova_compute_ironic_extra_volumes }}"
|
||||
dimensions: "{{ nova_compute_ironic_dimensions }}"
|
||||
|
||||
####################
|
||||
# Ceph
|
||||
####################
|
||||
ceph_nova_pool_type: "{{ ceph_pool_type }}"
|
||||
ceph_nova_cache_mode: "{{ ceph_cache_mode }}"
|
||||
|
||||
# Due to Ansible issues on include, you cannot override these variables. Please
|
||||
# override the variables they reference instead.
|
||||
nova_pool_name: "{{ ceph_nova_pool_name }}"
|
||||
nova_pool_type: "{{ ceph_nova_pool_type }}"
|
||||
nova_cache_mode: "{{ ceph_nova_cache_mode }}"
|
||||
nova_pool_pg_num: "{{ ceph_pool_pg_num }}"
|
||||
nova_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
|
||||
|
||||
# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and
|
||||
# qemu (1, 6, 0) or later. Set to "" to disable.
|
||||
nova_hw_disk_discard: "unmap"
|
||||
|
||||
ceph_client_nova_keyring_caps:
|
||||
mon: 'profile rbd'
|
||||
osd: >-
|
||||
profile rbd pool={{ ceph_cinder_pool_name }},
|
||||
profile rbd pool={{ ceph_cinder_pool_name }}-cache,
|
||||
profile rbd pool={{ ceph_nova_pool_name }},
|
||||
profile rbd pool={{ ceph_nova_pool_name }}-cache,
|
||||
profile rbd pool={{ ceph_glance_pool_name }},
|
||||
profile rbd pool={{ ceph_glance_pool_name }}-cache
|
||||
|
||||
nova-super-conductor:
|
||||
container_name: "nova_super_conductor"
|
||||
group: "nova-super-conductor"
|
||||
enabled: "{{ enable_cells }}"
|
||||
image: "{{ nova_super_conductor_image_full }}"
|
||||
volumes: "{{ nova_super_conductor_default_volumes + nova_super_conductor_extra_volumes }}"
|
||||
dimensions: "{{ nova_super_conductor_dimensions }}"
|
||||
|
||||
####################
|
||||
# Database
|
||||
####################
|
||||
# These are kept for backwards compatibility, as cell0 references them.
|
||||
nova_database_name: "nova"
|
||||
nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}"
|
||||
nova_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
|
||||
|
||||
nova_cell0_database_name: "{{ nova_database_name }}_cell0"
|
||||
nova_cell0_database_user: "{{ nova_database_user }}"
|
||||
nova_cell0_database_address: "{{ nova_database_address }}"
|
||||
nova_cell0_database_password: "{{ nova_database_password }}"
|
||||
|
||||
nova_api_database_name: "nova_api"
|
||||
nova_api_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova_api{% endif %}"
|
||||
nova_api_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
|
||||
@ -202,145 +79,53 @@ nova_api_database_address: "{{ database_address | put_address_in_context('url')
|
||||
nova_install_type: "{{ kolla_install_type }}"
|
||||
nova_tag: "{{ openstack_release }}"
|
||||
|
||||
nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-libvirt"
|
||||
nova_libvirt_tag: "{{ nova_tag }}"
|
||||
nova_libvirt_image_full: "{{ nova_libvirt_image }}:{{ nova_libvirt_tag }}"
|
||||
nova_libvirt_cpu_mode: "{{ 'host-passthrough' if ansible_architecture == 'aarch64' else '' }}"
|
||||
|
||||
nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-ssh"
|
||||
nova_ssh_tag: "{{ nova_tag }}"
|
||||
nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}"
|
||||
|
||||
nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-conductor"
|
||||
nova_conductor_tag: "{{ nova_tag }}"
|
||||
nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
|
||||
|
||||
nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-novncproxy"
|
||||
nova_novncproxy_tag: "{{ nova_tag }}"
|
||||
nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}"
|
||||
|
||||
nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-spicehtml5proxy"
|
||||
nova_spicehtml5proxy_tag: "{{ nova_tag }}"
|
||||
nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}"
|
||||
nova_super_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-conductor"
|
||||
nova_super_conductor_tag: "{{ nova_tag }}"
|
||||
nova_super_conductor_image_full: "{{ nova_super_conductor_image }}:{{ nova_super_conductor_tag }}"
|
||||
|
||||
nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-scheduler"
|
||||
nova_scheduler_tag: "{{ nova_tag }}"
|
||||
nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}"
|
||||
|
||||
nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute"
|
||||
nova_compute_tag: "{{ nova_tag }}"
|
||||
nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}"
|
||||
|
||||
nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-api"
|
||||
nova_api_tag: "{{ nova_tag }}"
|
||||
nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}"
|
||||
|
||||
nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute-ironic"
|
||||
nova_compute_ironic_tag: "{{ nova_tag }}"
|
||||
nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}"
|
||||
|
||||
nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-serialproxy"
|
||||
nova_serialproxy_tag: "{{ nova_tag }}"
|
||||
nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}"
|
||||
|
||||
nova_libvirt_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_ssh_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_api_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_novncproxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_scheduler_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_spicehtml5proxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_serialproxy_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_conductor_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_compute_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_compute_ironic_dimensions: "{{ default_container_dimensions }}"
|
||||
nova_super_conductor_dimensions: "{{ default_container_dimensions }}"
|
||||
|
||||
nova_libvirt_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "/run/:/run/:shared"
|
||||
- "/dev:/dev"
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "libvirtd:/var/lib/libvirt"
|
||||
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
|
||||
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
|
||||
- "nova_libvirt_qemu:/etc/libvirt/qemu"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_ssh_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-ssh/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla"
|
||||
- "{{ nova_instance_datadir_volume }}:/var/lib/nova"
|
||||
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_api_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_novncproxy_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_scheduler_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_spicehtml5proxy_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro"
|
||||
nova_super_conductor_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-super-conductor/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_serialproxy_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-serialproxy/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_conductor_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-conductor/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_compute_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "/run:/run:shared"
|
||||
- "/dev:/dev"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
|
||||
- "libvirtd:/var/lib/libvirt"
|
||||
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
|
||||
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
nova_compute_ironic_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
|
||||
# Used by bootstrapping containers.
|
||||
nova_api_bootstrap_default_volumes:
|
||||
- "{{ node_config_directory }}/nova-api-bootstrap/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
|
||||
|
||||
nova_extra_volumes: "{{ default_extra_volumes }}"
|
||||
nova_libvirt_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_ssh_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_api_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_novncproxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_scheduler_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_spicehtml5proxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_serialproxy_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_conductor_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_compute_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_compute_ironic_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
nova_super_conductor_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
# Used by bootstrapping containers.
|
||||
nova_api_bootstrap_extra_volumes: "{{ nova_extra_volumes }}"
|
||||
|
||||
|
||||
####################
|
||||
# HAProxy
|
||||
####################
|
||||
haproxy_nova_serialconsole_proxy_tunnel_timeout: "10m"
|
||||
|
||||
####################
|
||||
# OpenStack
|
||||
####################
|
||||
@ -357,30 +142,10 @@ nova_logging_debug: "{{ openstack_logging_debug }}"
|
||||
|
||||
openstack_nova_auth: "{{ openstack_auth }}"
|
||||
|
||||
nova_compute_host_rp_filter_mode: 0
|
||||
nova_safety_upgrade: "no"
|
||||
|
||||
nova_libvirt_port: "{{'16514' if libvirt_tls | bool else '16509'}}"
|
||||
nova_ssh_port: "8022"
|
||||
|
||||
nova_services_require_nova_conf:
|
||||
nova_services_require_policy_json:
|
||||
- nova-api
|
||||
- nova-compute
|
||||
- nova-compute-ironic
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-scheduler
|
||||
- nova-spicehtml5proxy
|
||||
|
||||
# After upgrading nova-compute, services will have an RPC version cap in place.
|
||||
# We need to restart all services that communicate with nova-compute in order
|
||||
# to allow them to use the latest RPC version. Ideally, there would be a way to
|
||||
# check whether all nova services are using the latest version, but currently
|
||||
# there is not. Instead, wait a short time for all nova compute services to
|
||||
# update the version of their service in the database. This seems to take
|
||||
# around 10 seconds, but the default is 30 to allow room for slowness.
|
||||
nova_compute_startup_delay: 30
|
||||
|
||||
####################
|
||||
# Keystone
|
||||
@ -420,31 +185,6 @@ nova_notification_topics:
|
||||
|
||||
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
|
||||
|
||||
####################
|
||||
# VMware
|
||||
####################
|
||||
vmware_vcenter_datastore_regex: ".*"
|
||||
ovs_bridge: "nsx-managed"
|
||||
|
||||
####################
|
||||
# Libvirt/qemu
|
||||
####################
|
||||
# The number of max files qemu can open
|
||||
qemu_max_files: 32768
|
||||
# The number of max processes qemu can open
|
||||
qemu_max_processes: 131072
|
||||
# Use TLS for libvirt connections and live migration
|
||||
libvirt_tls: false
|
||||
# Should kolla-ansible manage/copy the certs. False, assumes the deployer is
|
||||
# responsible for making the TLS certs show up in the config directories
|
||||
# also means the deployer is responsible for restarting the nova_compute and
|
||||
# nova_libvirt containers when the key changes, as we can't know when to do that
|
||||
libvirt_tls_manage_certs: true
|
||||
# When using tls we are verfiying the hostname we are connected to matches the
|
||||
# libvirt cert we are presented. As such we can't use IP's here, but keep the
|
||||
# ability for people to override the hostname to use.
|
||||
migration_hostname: "{{ ansible_nodename }}"
|
||||
|
||||
####################
|
||||
# Kolla
|
||||
####################
|
||||
@ -452,9 +192,3 @@ nova_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
|
||||
nova_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
|
||||
nova_dev_mode: "{{ kolla_dev_mode }}"
|
||||
nova_source_version: "{{ kolla_source_version }}"
|
||||
|
||||
###################################
|
||||
# Enable Shared Bind Propogation
|
||||
###################################
|
||||
|
||||
enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}"
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Restart nova-conductor container
|
||||
- name: Restart nova-super-conductor container
|
||||
vars:
|
||||
service_name: "nova-conductor"
|
||||
service_name: "nova-super-conductor"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
@ -14,45 +14,7 @@
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-ssh container
|
||||
vars:
|
||||
service_name: "nova-ssh"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
pid_mode: "{{ service.pid_mode | default('') }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-libvirt container
|
||||
vars:
|
||||
service_name: "nova-libvirt"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
pid_mode: "{{ service.pid_mode | default('') }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
register: restart_nova_libvirt
|
||||
# NOTE(Jeffrey4l): retry 5 to remove nova_libvirt container because when
|
||||
# guests running, nova_libvirt will raise error even though it is removed.
|
||||
retries: 5
|
||||
until: restart_nova_libvirt is success
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-scheduler container
|
||||
vars:
|
||||
@ -69,54 +31,7 @@
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-novncproxy container
|
||||
vars:
|
||||
service_name: "nova-novncproxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-spicehtml5proxy container
|
||||
vars:
|
||||
service_name: "nova-spicehtml5proxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-serialproxy container
|
||||
vars:
|
||||
service_name: "nova-serialproxy"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
||||
- name: Restart nova-api container
|
||||
vars:
|
||||
@ -133,108 +48,4 @@
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-compute container
|
||||
vars:
|
||||
service_name: "nova-compute"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
environment: "{{ service.environment | default(omit) }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
ipc_mode: "{{ service.ipc_mode | default(omit) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart nova-compute-ironic container
|
||||
vars:
|
||||
service_name: "nova-compute-ironic"
|
||||
service: "{{ nova_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
# nova-compute-fake is special. It will start multi numbers of container
|
||||
# so put all variables here rather than defaults/main.yml file
|
||||
- name: Restart nova-compute-fake containers
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "nova_compute_fake_{{ item }}"
|
||||
image: "{{ nova_compute_image_full }}"
|
||||
privileged: True
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "/run:/run:shared"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
# NOTE(mgoddard): After upgrading nova-compute, services will have an RPC
|
||||
# version cap in place. We need to restart all services that communicate with
|
||||
# nova-compute in order to allow them to use the latest RPC version. Ideally,
|
||||
# there would be a way to check whether all nova services are using the latest
|
||||
# version, but currently there is not. Instead, wait a short time for all nova
|
||||
# compute services to update the version of their service in the database.
|
||||
# This seems to take around 10 seconds, but the default is 30 to allow room
|
||||
# for slowness.
|
||||
|
||||
- name: Wait for nova-compute services to update service versions
|
||||
pause:
|
||||
seconds: "{{ nova_compute_startup_delay }}"
|
||||
run_once: true
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
listen:
|
||||
- Restart nova-compute container
|
||||
- Restart nova-compute-ironic container
|
||||
- Restart nova-compute-fake containers
|
||||
|
||||
# NOTE(mgoddard): Currently (just prior to Stein release), sending SIGHUP to
|
||||
# nova compute services leaves them in a broken state in which they cannot
|
||||
# start new instances. The following error is seen in the logs:
|
||||
# "In shutdown, no new events can be scheduled"
|
||||
# To work around this we restart the nova-compute services.
|
||||
# Speaking to the nova team, this seems to be an issue in oslo.service,
|
||||
# with a fix proposed here: https://review.openstack.org/#/c/641907.
|
||||
# This issue also seems to affect the proxy services, which exit non-zero in
|
||||
# reponse to a SIGHUP, so restart those too.
|
||||
# The issue actually affects all nova services, since they remain with RPC
|
||||
# version pinned to the previous release:
|
||||
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
|
||||
# TODO(mgoddard): Use SIGHUP when this bug has been fixed.
|
||||
|
||||
- name: Restart nova services to remove RPC version cap
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: restart_container
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- item.key in nova_services_require_nova_conf
|
||||
with_dict: "{{ nova_services }}"
|
||||
listen:
|
||||
- Restart nova-compute container
|
||||
- Restart nova-compute-ironic container
|
||||
- Restart nova-compute-fake containers
|
||||
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
|
||||
|
@ -10,10 +10,8 @@
|
||||
login_password: "{{ database_password }}"
|
||||
name: "{{ item }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
with_items:
|
||||
- "{{ nova_database_name }}"
|
||||
- "{{ nova_database_name }}_cell0"
|
||||
- "{{ nova_cell0_database_name }}"
|
||||
- "{{ nova_api_database_name }}"
|
||||
when:
|
||||
- not use_preconfigured_databases | bool
|
||||
@ -33,21 +31,21 @@
|
||||
priv: "{{ item.database_name }}.*:ALL"
|
||||
append_privs: "yes"
|
||||
with_items:
|
||||
- database_name: "{{ nova_database_name }}"
|
||||
database_username: "{{ nova_database_user }}"
|
||||
database_password: "{{ nova_database_password }}"
|
||||
- database_name: "{{ nova_database_name }}_cell0"
|
||||
database_username: "{{ nova_database_user }}"
|
||||
database_password: "{{ nova_database_password }}"
|
||||
- database_name: "{{ nova_cell0_database_name }}"
|
||||
database_username: "{{ nova_cell0_database_user }}"
|
||||
database_password: "{{ nova_cell0_database_password }}"
|
||||
- database_name: "{{ nova_api_database_name }}"
|
||||
database_username: "{{ nova_api_database_user }}"
|
||||
database_password: "{{ nova_api_database_password }}"
|
||||
loop_control:
|
||||
label: "{{ item.database_name }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
when:
|
||||
- not use_preconfigured_databases | bool
|
||||
no_log: true
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
- import_tasks: config_bootstrap.yml
|
||||
|
||||
- import_tasks: bootstrap_service.yml
|
||||
|
||||
- import_tasks: map_cell0.yml
|
||||
|
@ -1,8 +1,10 @@
|
||||
---
|
||||
- name: Running Nova bootstrap container
|
||||
# TODO(mgoddard): We could use nova-manage db sync --local_cell, otherwise we
|
||||
# sync cell0 twice. Should not be harmful without though.
|
||||
- name: Running Nova API bootstrap container
|
||||
become: true
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
@ -13,8 +15,9 @@
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_nova"
|
||||
name: "nova_api_bootstrap"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
|
||||
register: bootstrap_result
|
||||
changed_when: bootstrap_result.stdout | default("") | length > 0
|
||||
run_once: true
|
||||
|
6
ansible/roles/nova/tasks/bootstrap_upgrade.yml
Normal file
6
ansible/roles/nova/tasks/bootstrap_upgrade.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# For upgrade, we need to apply DB schema migrations to the API and cell0
|
||||
# databases.
|
||||
|
||||
- import_tasks: config_bootstrap.yml
|
||||
- import_tasks: bootstrap_service.yml
|
@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: Install package python-os-xenapi
|
||||
package:
|
||||
name: python-os-xenapi
|
||||
state: present
|
||||
become: True
|
||||
|
||||
- name: Ensure XenAPI root path
|
||||
file:
|
||||
path: "{{ xenapi_facts_root }}"
|
||||
state: directory
|
||||
mode: "0770"
|
||||
become: True
|
||||
|
||||
- name: Bootstrap XenAPI compute node
|
||||
vars:
|
||||
xenapi_facts_path: "{{ xenapi_facts_root + '/' + xenapi_facts_file }}"
|
||||
command: xenapi_bootstrap -i {{ xenserver_himn_ip }} -u {{ xenserver_username }} -p {{ xenserver_password }} -f {{ xenapi_facts_path }}
|
||||
become: True
|
||||
|
||||
- name: Fetching XenAPI facts file
|
||||
fetch:
|
||||
src: "{{ xenapi_facts_root + '/' + xenapi_facts_file }}"
|
||||
dest: "{{ xenapi_facts_root + '/' + inventory_hostname + '/' }}"
|
||||
flat: yes
|
||||
become: True
|
@ -1,119 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directory exists
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
become: true
|
||||
with_items:
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
|
||||
- name: Copying over ceph.conf(s)
|
||||
vars:
|
||||
service_name: "{{ item }}"
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
|
||||
- "{{ node_custom_config }}/ceph.conf"
|
||||
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
|
||||
mode: "0660"
|
||||
become: true
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt"
|
||||
when: inventory_hostname in groups['compute']
|
||||
notify:
|
||||
- Restart {{ item }} container
|
||||
|
||||
- include_tasks: ../../ceph_pools.yml
|
||||
vars:
|
||||
pool_name: "{{ nova_pool_name }}"
|
||||
pool_type: "{{ nova_pool_type }}"
|
||||
cache_mode: "{{ nova_cache_mode }}"
|
||||
pool_pg_num: "{{ nova_pool_pg_num }}"
|
||||
pool_pgp_num: "{{ nova_pool_pgp_num }}"
|
||||
pool_application: "rbd"
|
||||
|
||||
- name: Pulling cephx keyring for nova
|
||||
become: true
|
||||
kolla_ceph_keyring:
|
||||
name: client.nova
|
||||
caps: "{{ ceph_client_nova_keyring_caps }}"
|
||||
register: nova_cephx_key
|
||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
||||
run_once: True
|
||||
|
||||
- name: Pulling cinder cephx keyring for libvirt
|
||||
become: true
|
||||
command: docker exec ceph_mon ceph auth get-key client.cinder
|
||||
register: cinder_cephx_raw_key
|
||||
delegate_to: "{{ groups['ceph-mon'][0] }}"
|
||||
when:
|
||||
- enable_cinder | bool
|
||||
- cinder_backend_ceph | bool
|
||||
changed_when: False
|
||||
run_once: True
|
||||
|
||||
- name: Pushing cephx keyring for nova
|
||||
copy:
|
||||
content: |
|
||||
[client.nova]
|
||||
key = {{ nova_cephx_key.keyring.key }}
|
||||
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when: inventory_hostname in groups['compute']
|
||||
notify:
|
||||
- Restart nova-compute container
|
||||
|
||||
- name: Pushing secrets xml for libvirt
|
||||
template:
|
||||
src: "secret.xml.j2"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.xml"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
name: client.nova secret
|
||||
enabled: true
|
||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
||||
name: client.cinder secret
|
||||
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool}}"
|
||||
notify:
|
||||
- Restart nova-libvirt container
|
||||
|
||||
- name: Pushing secrets key for libvirt
|
||||
copy:
|
||||
content: "{{ item.content }}"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.base64"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
content: "{{ nova_cephx_key.keyring.key }}"
|
||||
enabled: true
|
||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
||||
content: "{{ cinder_cephx_raw_key.stdout|default('') }}"
|
||||
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool}}"
|
||||
notify:
|
||||
- Restart nova-libvirt container
|
||||
|
||||
- name: Ensuring config directory has correct owner and permission
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
recurse: yes
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Copying over libvirt TLS keys {{ file }}
|
||||
become: true
|
||||
copy:
|
||||
src: "{{ first_found }}"
|
||||
dest: "{{ node_config_directory }}/{{ service_name }}/{{ file }}"
|
||||
mode: "0600"
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/nova/nova-libvirt/{{ inventory_hostname }}/{{ file }}"
|
||||
- "{{ node_custom_config }}/nova/nova-libvirt/{{ file }}"
|
||||
loop_control:
|
||||
loop_var: first_found
|
||||
notify:
|
||||
- Restart {{ service_name }} container
|
@ -1,68 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
notify:
|
||||
- Restart nova-compute-fake containers
|
||||
|
||||
- name: Copying over config.json files for services
|
||||
become: true
|
||||
template:
|
||||
src: "nova-compute.json.j2"
|
||||
dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/config.json"
|
||||
mode: "0660"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
notify:
|
||||
- Restart nova-compute-fake containers
|
||||
|
||||
- name: Copying over nova.conf
|
||||
become: true
|
||||
vars:
|
||||
service_name: "{{ item }}"
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/nova.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/nova.conf"
|
||||
- "{{ node_custom_config }}/nova/{{ item }}.conf"
|
||||
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
|
||||
dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/nova.conf"
|
||||
mode: "0660"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
|
||||
- name: Ensuring config directory has correct owner and permission
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}"
|
||||
recurse: yes
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
notify:
|
||||
- Restart nova-compute-fake containers
|
||||
|
||||
- name: Check nova-compute-fake containers
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "compare_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "nova_compute_fake_{{ item }}"
|
||||
image: "{{ nova_compute_image_full }}"
|
||||
privileged: True
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/lib/modules:/lib/modules:ro"
|
||||
- "/run:/run:shared"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
with_sequence: start=1 end={{ num_nova_fake_per_node }}
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
- inventory_hostname in groups['compute']
|
||||
- enable_nova_fake | bool
|
||||
notify:
|
||||
- Restart nova-compute-fake containers
|
@ -1,16 +1,4 @@
|
||||
---
|
||||
- name: Setting sysctl values
|
||||
become: true
|
||||
sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
|
||||
with_items:
|
||||
- { name: "net.bridge.bridge-nf-call-iptables", value: 1}
|
||||
- { name: "net.bridge.bridge-nf-call-ip6tables", value: 1}
|
||||
- { name: "net.ipv4.conf.all.rp_filter", value: "{{ nova_compute_host_rp_filter_mode }}"}
|
||||
- { name: "net.ipv4.conf.default.rp_filter", value: "{{ nova_compute_host_rp_filter_mode }}"}
|
||||
when:
|
||||
- set_sysctl | bool
|
||||
- inventory_hostname in groups['compute']
|
||||
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
file:
|
||||
@ -24,21 +12,6 @@
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ nova_services }}"
|
||||
|
||||
- include_tasks: ceph.yml
|
||||
when:
|
||||
- enable_ceph | bool and nova_backend == "rbd"
|
||||
- inventory_hostname in groups['ceph-mon'] or
|
||||
inventory_hostname in groups['compute'] or
|
||||
inventory_hostname in groups['nova-api'] or
|
||||
inventory_hostname in groups['nova-conductor'] or
|
||||
inventory_hostname in groups['nova-novncproxy'] or
|
||||
inventory_hostname in groups['nova-scheduler']
|
||||
|
||||
- include_tasks: external_ceph.yml
|
||||
when:
|
||||
- not enable_ceph | bool and (nova_backend == "rbd" or cinder_backend_ceph | bool)
|
||||
- inventory_hostname in groups['compute']
|
||||
|
||||
- name: Check if policies shall be overwritten
|
||||
local_action: stat path="{{ item }}"
|
||||
run_once: True
|
||||
@ -69,13 +42,6 @@
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
- name: Set XenAPI facts
|
||||
set_fact:
|
||||
xenapi_facts: "{{ lookup('file', xenapi_facts_root + '/' + inventory_hostname + '/' + xenapi_facts_file) | from_json }}"
|
||||
when:
|
||||
- nova_compute_virt_type == 'xenapi'
|
||||
- inventory_hostname in groups['compute']
|
||||
|
||||
- name: Copying over nova.conf
|
||||
become: true
|
||||
vars:
|
||||
@ -92,127 +58,12 @@
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- item.key in nova_services_require_nova_conf
|
||||
with_dict: "{{ nova_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
- name: Copying over libvirt configuration
|
||||
become: true
|
||||
vars:
|
||||
service: "{{ nova_services['nova-libvirt'] }}"
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}"
|
||||
mode: "0660"
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_items:
|
||||
- { src: "qemu.conf.j2", dest: "qemu.conf" }
|
||||
- { src: "libvirtd.conf.j2", dest: "libvirtd.conf" }
|
||||
notify:
|
||||
- Restart nova-libvirt container
|
||||
|
||||
- name: Copying over libvirt TLS keys (nova-libvirt)
|
||||
include_tasks: "config-libvirt-tls.yml"
|
||||
vars:
|
||||
service: "{{ nova_services['nova-libvirt'] }}"
|
||||
service_name: nova-libvirt
|
||||
file: "{{ item }}"
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
- libvirt_tls | bool
|
||||
- libvirt_tls_manage_certs | bool
|
||||
with_items:
|
||||
- cacert.pem
|
||||
- servercert.pem
|
||||
- serverkey.pem
|
||||
- clientcert.pem
|
||||
- clientkey.pem
|
||||
|
||||
- name: Copying over libvirt TLS keys (nova-compute)
|
||||
include_tasks: "config-libvirt-tls.yml"
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
service_name: nova-compute
|
||||
file: "{{ item }}"
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
- libvirt_tls | bool
|
||||
- libvirt_tls_manage_certs | bool
|
||||
with_items:
|
||||
- cacert.pem
|
||||
- clientcert.pem
|
||||
- clientkey.pem
|
||||
|
||||
- name: Copying files for nova-ssh
|
||||
become: true
|
||||
vars:
|
||||
service: "{{ nova_services['nova-ssh'] }}"
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}"
|
||||
mode: "0660"
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_items:
|
||||
- { src: "sshd_config.j2", dest: "sshd_config" }
|
||||
- { src: "id_rsa", dest: "id_rsa" }
|
||||
- { src: "id_rsa.pub", dest: "id_rsa.pub" }
|
||||
- { src: "ssh_config.j2", dest: "ssh_config" }
|
||||
notify:
|
||||
- Restart nova-ssh container
|
||||
|
||||
- name: Copying VMware vCenter CA file
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
copy:
|
||||
src: "{{ node_custom_config }}/vmware_ca"
|
||||
dest: "{{ node_config_directory }}/nova-compute/vmware_ca"
|
||||
mode: "0660"
|
||||
when:
|
||||
- nova_compute_virt_type == "vmware"
|
||||
- not vmware_vcenter_insecure | bool
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
notify:
|
||||
- Restart nova-compute container
|
||||
|
||||
- name: Copying 'release' file for nova_compute
|
||||
vars:
|
||||
service: "{{ nova_services['nova-compute'] }}"
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/nova-compute/release"
|
||||
mode: "0660"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ node_custom_config }}/nova_compute/{{ inventory_hostname }}/release"
|
||||
- "{{ node_custom_config }}/nova_compute/release"
|
||||
- "{{ node_custom_config }}/nova/release"
|
||||
skip: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
notify:
|
||||
- Restart nova-compute container
|
||||
|
||||
- name: Copying over existing policy file
|
||||
become: true
|
||||
vars:
|
||||
services_require_policy_json:
|
||||
- nova-api
|
||||
- nova-compute
|
||||
- nova-compute-ironic
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-serialproxy
|
||||
- nova-scheduler
|
||||
- nova-spicehtml5proxy
|
||||
template:
|
||||
src: "{{ nova_policy_file_path }}"
|
||||
dest: "{{ node_config_directory }}/{{ item.key }}/{{ nova_policy_file }}"
|
||||
@ -221,7 +72,7 @@
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_policy_file is defined
|
||||
- item.key in services_require_policy_json
|
||||
- item.key in nova_services_require_policy_json
|
||||
with_dict: "{{ nova_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
35
ansible/roles/nova/tasks/config_bootstrap.yml
Normal file
35
ansible/roles/nova/tasks/config_bootstrap.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
# Generate configuration for bootstrapping containers.
|
||||
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/nova-api-bootstrap"
|
||||
state: "directory"
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
mode: "0770"
|
||||
run_once: true
|
||||
|
||||
- name: Copying over config.json files for nova-api-bootstrap
|
||||
become: true
|
||||
template:
|
||||
src: "nova-api-bootstrap.json.j2"
|
||||
dest: "{{ node_config_directory }}/nova-api-bootstrap/config.json"
|
||||
mode: "0660"
|
||||
run_once: true
|
||||
|
||||
- name: Copying over nova.conf for nova-api-bootstrap
|
||||
become: true
|
||||
vars:
|
||||
service_name: "nova-api-bootstrap"
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/nova.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/nova.conf"
|
||||
- "{{ node_custom_config }}/nova/nova-api.conf"
|
||||
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
|
||||
dest: "{{ node_config_directory }}/nova-api-bootstrap/nova.conf"
|
||||
mode: "0660"
|
||||
run_once: true
|
@ -1,128 +0,0 @@
|
||||
---
|
||||
- name: Create cell0 mappings
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell0_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: map_cell0
|
||||
changed_when:
|
||||
- map_cell0 is success
|
||||
- '"Cell0 is already setup" not in map_cell0.stdout'
|
||||
failed_when:
|
||||
- map_cell0.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
when: map_cell0.changed
|
||||
|
||||
- name: Get list of existing cells
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 list_cells --verbose'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "list_cells_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: existing_cells_list
|
||||
changed_when: false
|
||||
failed_when:
|
||||
- existing_cells_list.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- name: Check if a base cell already exists
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
# We match lines containing a UUID in a column (except the one used by
|
||||
# cell0), followed by two other columns, the first containing the transport
|
||||
# URL and the second the database connection. For example:
|
||||
#
|
||||
# | None | 68a3f49e-27ec-422f-9e2e-2a4e5dc8291b | rabbit://openstack:password@1.2.3.4:5672 | mysql+pymysql://nova:password@1.2.3.4:3306/nova | False |
|
||||
#
|
||||
# NOTE(priteau): regexp doesn't support passwords containing spaces
|
||||
regexp: '\| +(?!00000000-0000-0000-0000-000000000000)([0-9a-f\-]+) +\| +([^ ]+) +\| +([^ ]+) +\| +([^ ]+) +\|$'
|
||||
set_fact:
|
||||
existing_cells: "{{ existing_cells_list.stdout | regex_findall(regexp, multiline=True) }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- name: Create base cell for legacy instances
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 create_cell'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: base_cell
|
||||
changed_when:
|
||||
- base_cell is success
|
||||
failed_when:
|
||||
- base_cell.rc != 0
|
||||
- '"already exists" not in base_cell.stdout'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
when: existing_cells | length == 0
|
||||
|
||||
- name: Update base cell for legacy instances
|
||||
vars:
|
||||
connection_url: "mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}"
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: "bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 update_cell --cell_uuid {{ existing_cells[0][0] }}'"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "create_cell_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
|
||||
register: base_cell
|
||||
changed_when:
|
||||
- base_cell is success
|
||||
failed_when:
|
||||
- base_cell.rc != 0
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
when:
|
||||
- existing_cells | length == 1
|
||||
- existing_cells[0][1] != rpc_transport_url or existing_cells[0][2] != connection_url
|
||||
|
||||
- name: Print warning if a duplicate cell is detected
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
fail:
|
||||
msg: Multiple base cells detected, manual cleanup with `nova-manage cell_v2` may be required.
|
||||
ignore_errors: yes
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
when:
|
||||
- existing_cells | length > 1
|
@ -2,30 +2,10 @@
|
||||
- include_tasks: register.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
|
||||
- include_tasks: bootstrap_xenapi.yml
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- nova_compute_virt_type == "xenapi"
|
||||
|
||||
- include_tasks: clone.yml
|
||||
when: nova_dev_mode | bool
|
||||
|
||||
- include_tasks: config.yml
|
||||
|
||||
- include_tasks: config-nova-fake.yml
|
||||
when:
|
||||
- enable_nova_fake | bool
|
||||
- inventory_hostname in groups['compute']
|
||||
|
||||
- include_tasks: bootstrap.yml
|
||||
when: inventory_hostname in groups['nova-api'] or
|
||||
inventory_hostname in groups['compute']
|
||||
|
||||
- include_tasks: create_cells.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- include_tasks: discover_computes.yml
|
||||
when: inventory_hostname in groups['nova-api']
|
||||
|
@ -1,82 +0,0 @@
|
||||
---
|
||||
# We need to wait for all expected compute services to register before running
|
||||
# cells v2 host discovery. This includes virtualised compute services and
|
||||
# ironic compute services.
|
||||
# Work with --limit by including only hosts in ansible_play_batch.
|
||||
- name: Build a list of expected compute service hosts
|
||||
vars:
|
||||
# For virt, use ansible_nodename rather than inventory_hostname, since this
|
||||
# is similar to what nova uses internally as its default for the
|
||||
# [DEFAULT] host config option.
|
||||
virt_compute_service_hosts: >-
|
||||
{{ groups['compute'] |
|
||||
intersect(ansible_play_batch) |
|
||||
map('extract', hostvars, 'ansible_nodename') |
|
||||
list }}
|
||||
# For ironic, use {{ansible_hostname}}-ironic since this is what we
|
||||
# configure for [DEFAULT] host in nova.conf.
|
||||
ironic_compute_service_hosts: >-
|
||||
{{ (groups['nova-compute-ironic'] |
|
||||
intersect(ansible_play_batch) |
|
||||
map('extract', hostvars, 'ansible_hostname') |
|
||||
map('regex_replace', '^(.*)$', '\1-ironic') |
|
||||
list)
|
||||
if enable_ironic | bool else [] }}
|
||||
set_fact:
|
||||
expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
|
||||
- name: Waiting for nova-compute services to register themselves
|
||||
become: true
|
||||
command: >
|
||||
docker exec kolla_toolbox openstack
|
||||
--os-interface internal
|
||||
--os-auth-url {{ keystone_admin_url }}
|
||||
--os-identity-api-version 3
|
||||
--os-project-domain-name {{ openstack_auth.domain_name }}
|
||||
--os-tenant-name {{ openstack_auth.project_name }}
|
||||
--os-username {{ openstack_auth.username }}
|
||||
--os-password {{ keystone_admin_password }}
|
||||
--os-user-domain-name {{ openstack_auth.domain_name }}
|
||||
--os-region-name {{ openstack_region_name }}
|
||||
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
|
||||
compute service list --format json --column Host --service nova-compute
|
||||
register: nova_compute_services
|
||||
changed_when: false
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
retries: 20
|
||||
delay: 10
|
||||
until:
|
||||
- nova_compute_services is success
|
||||
# A list containing the 'Host' field of compute services that have
|
||||
# registered themselves. Don't exclude compute services that are disabled
|
||||
# since these could have been explicitly disabled by the operator. While we
|
||||
# could exclude services that are down, the nova-manage cell_v2
|
||||
# discover_hosts does not do this so let's not block on it here.
|
||||
# NOTE(mgoddard): Cannot factor this out into an intermediary variable
|
||||
# before ansible 2.8, due to
|
||||
# https://bugs.launchpad.net/kolla-ansible/+bug/1835817.
|
||||
- (nova_compute_services.stdout |
|
||||
from_json |
|
||||
map(attribute='Host') |
|
||||
list)
|
||||
is superset(expected_compute_service_hosts)
|
||||
|
||||
# TODO(yoctozepto): no need to do --by-service if ironic not used
|
||||
- name: Discover nova hosts
|
||||
become: true
|
||||
command: >
|
||||
docker exec nova_api nova-manage cell_v2 discover_hosts --by-service
|
||||
changed_when: False
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['nova-api'][0] }}"
|
||||
|
||||
# NOTE(yoctozepto): SIGHUP is probably unnecessary
|
||||
- name: Refresh cell cache in nova scheduler
|
||||
become: true
|
||||
command: docker kill --signal HUP nova_scheduler
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname in groups['nova-scheduler']
|
@ -1,129 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directory exists
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
become: true
|
||||
with_items:
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
||||
|
||||
- name: Check nova keyring file
|
||||
local_action: stat path="{{ node_custom_config }}/nova/ceph.client.nova.keyring"
|
||||
run_once: True
|
||||
register: nova_cephx_keyring_file
|
||||
failed_when: not nova_cephx_keyring_file.stat.exists
|
||||
when:
|
||||
- nova_backend == "rbd"
|
||||
- external_ceph_cephx_enabled | bool
|
||||
|
||||
- name: Check cinder keyring file
|
||||
local_action: stat path="{{ node_custom_config }}/nova/ceph.client.cinder.keyring"
|
||||
run_once: True
|
||||
register: cinder_cephx_keyring_file
|
||||
failed_when: not cinder_cephx_keyring_file.stat.exists
|
||||
when:
|
||||
- cinder_backend_ceph | bool
|
||||
- external_ceph_cephx_enabled | bool
|
||||
|
||||
# NOTE: nova-compute and nova-libvirt only need ceph.client.nova.keyring.
|
||||
- name: Copy over ceph nova keyring file
|
||||
copy:
|
||||
src: "{{ nova_cephx_keyring_file.stat.path }}"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/"
|
||||
mode: "0660"
|
||||
become: true
|
||||
with_items:
|
||||
- nova-compute
|
||||
- nova-libvirt
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- nova_backend == "rbd"
|
||||
- external_ceph_cephx_enabled | bool
|
||||
notify:
|
||||
- Restart {{ item }} container
|
||||
|
||||
- name: Copy over ceph.conf
|
||||
template:
|
||||
src: "{{ node_custom_config }}/nova/ceph.conf"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/"
|
||||
mode: "0660"
|
||||
become: true
|
||||
with_items:
|
||||
- nova-compute
|
||||
- nova-libvirt
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- nova_backend == "rbd"
|
||||
notify:
|
||||
- Restart {{ item }} container
|
||||
|
||||
- name: Pushing nova secret xml for libvirt
|
||||
template:
|
||||
src: "secret.xml.j2"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.xml"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- item.enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
name: "client.nova secret"
|
||||
enabled: "{{ nova_backend == 'rbd' }}"
|
||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
||||
name: "client.cinder secret"
|
||||
enabled: "{{ cinder_backend_ceph }}"
|
||||
notify:
|
||||
- Restart nova-libvirt container
|
||||
|
||||
- name: Extract nova key from file
|
||||
local_action: shell cat "{{ nova_cephx_keyring_file.stat.path }}" | grep -E 'key\s*=' | awk '{ print $3 }'
|
||||
changed_when: false
|
||||
run_once: True
|
||||
register: nova_cephx_raw_key
|
||||
when:
|
||||
- nova_backend == "rbd"
|
||||
- external_ceph_cephx_enabled | bool
|
||||
|
||||
- name: Extract cinder key from file
|
||||
local_action: shell cat "{{ cinder_cephx_keyring_file.stat.path }}" | grep -E 'key\s*=' | awk '{ print $3 }'
|
||||
changed_when: false
|
||||
run_once: True
|
||||
register: cinder_cephx_raw_key
|
||||
when:
|
||||
- cinder_backend_ceph | bool
|
||||
- external_ceph_cephx_enabled | bool
|
||||
|
||||
- name: Pushing secrets key for libvirt
|
||||
copy:
|
||||
content: "{{ item.result.stdout }}"
|
||||
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.base64"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups['compute']
|
||||
- item.enabled | bool
|
||||
- external_ceph_cephx_enabled | bool
|
||||
with_items:
|
||||
- uuid: "{{ rbd_secret_uuid }}"
|
||||
result: "{{ nova_cephx_raw_key }}"
|
||||
enabled: "{{ nova_backend == 'rbd' }}"
|
||||
- uuid: "{{ cinder_rbd_secret_uuid }}"
|
||||
result: "{{ cinder_cephx_raw_key }}"
|
||||
enabled: "{{ cinder_backend_ceph }}"
|
||||
notify:
|
||||
- Restart nova-libvirt container
|
||||
|
||||
- name: Ensuring config directory has correct owner and permission
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
recurse: yes
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
with_items:
|
||||
- "nova-compute"
|
||||
- "nova-libvirt/secrets"
|
||||
when: inventory_hostname in groups['compute']
|
26
ansible/roles/nova/tasks/map_cell0.yml
Normal file
26
ansible/roles/nova/tasks/map_cell0.yml
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Create cell0 mappings
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
nova_cell0_connection: "mysql+pymysql://{{ nova_cell0_database_user }}:{{ nova_cell0_database_password }}@{{ nova_cell0_database_address }}/{{ nova_cell0_database_name }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0 --database_connection {{ nova_cell0_connection }}'
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "nova_api_map_cell0"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
|
||||
register: map_cell0
|
||||
changed_when:
|
||||
- map_cell0 is success
|
||||
- '"Cell0 is already setup" not in map_cell0.stdout'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
when: map_cell0.changed
|
20
ansible/roles/nova/tasks/online_data_migrations.yml
Normal file
20
ansible/roles/nova/tasks/online_data_migrations.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Run Nova API online database migrations
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_OSM:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "nova_api_online_data_migrations"
|
||||
restart_policy: "no"
|
||||
volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
@ -4,23 +4,8 @@
|
||||
kolla_container_facts:
|
||||
name:
|
||||
- nova_api
|
||||
- nova_novncproxy
|
||||
- nova_serialproxy
|
||||
- nova_spicehtml5proxy
|
||||
- nova_ssh
|
||||
- nova_libvirt
|
||||
register: container_facts
|
||||
|
||||
- name: Checking available compute nodes in inventory
|
||||
vars:
|
||||
nova_compute_ironic: "{{ nova_services['nova-compute-ironic'] }}"
|
||||
fail:
|
||||
msg: >
|
||||
At least 1 compute node required in inventory when ironic is disabled.
|
||||
when:
|
||||
- groups['compute'] | length < 1
|
||||
- not nova_compute_ironic.enabled | bool
|
||||
|
||||
- name: Checking free port for Nova API
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
@ -49,87 +34,6 @@
|
||||
- inventory_hostname in groups[nova_api.group]
|
||||
- nova_api.enabled | bool
|
||||
|
||||
- name: Checking free port for Nova NoVNC Proxy
|
||||
vars:
|
||||
nova_novncproxy: "{{ nova_services['nova-novncproxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_novncproxy_listen_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_novncproxy'] is not defined
|
||||
- nova_novncproxy.enabled | bool
|
||||
- inventory_hostname in groups[nova_novncproxy.group]
|
||||
|
||||
- name: Checking free port for Nova Serial Proxy
|
||||
vars:
|
||||
nova_serialproxy: "{{ nova_services['nova-serialproxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_serialproxy_listen_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_serialproxy'] is not defined
|
||||
- nova_serialproxy.enabled | bool
|
||||
- inventory_hostname in groups[nova_serialproxy.group]
|
||||
|
||||
- name: Checking free port for Nova Spice HTML5 Proxy
|
||||
vars:
|
||||
nova_spicehtml5proxy: "{{ nova_services['nova-spicehtml5proxy'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_spicehtml5proxy_listen_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_spicehtml5proxy'] is not defined
|
||||
- nova_spicehtml5proxy.enabled | bool
|
||||
- inventory_hostname in groups[nova_spicehtml5proxy.group]
|
||||
|
||||
- name: Checking free port for Nova SSH
|
||||
vars:
|
||||
nova_ssh: "{{ nova_services['nova-ssh'] }}"
|
||||
wait_for:
|
||||
host: "{{ migration_interface_address }}"
|
||||
port: "{{ nova_ssh_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_ssh'] is not defined
|
||||
- nova_ssh.enabled | bool
|
||||
- inventory_hostname in groups[nova_ssh.group]
|
||||
|
||||
- name: Checking free port for Nova Libvirt
|
||||
vars:
|
||||
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ nova_libvirt_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['nova_libvirt'] is not defined
|
||||
- nova_libvirt.enabled | bool
|
||||
- inventory_hostname in groups[nova_libvirt.group]
|
||||
|
||||
- name: Checking that libvirt is not running
|
||||
vars:
|
||||
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
|
||||
stat: path=/var/run/libvirt/libvirt-sock
|
||||
register: result
|
||||
failed_when: result.stat.exists
|
||||
when:
|
||||
- nova_compute_virt_type in ['kvm', 'qemu']
|
||||
- container_facts['nova_libvirt'] is not defined
|
||||
- inventory_hostname in groups[nova_libvirt.group]
|
||||
|
||||
# TODO(mgoddard): Remove this task in the Ussuri cycle.
|
||||
- name: Check that legacy upgrade is not enabled
|
||||
fail:
|
||||
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
# This is necessary after new cells have been created to refresh the cell cache
|
||||
# in nova scheduler.
|
||||
- name: Refresh cell cache in nova scheduler
|
||||
become: true
|
||||
command: docker kill --signal HUP nova_scheduler
|
||||
changed_when: False
|
||||
when:
|
||||
- inventory_hostname in groups['nova-scheduler']
|
23
ansible/roles/nova/tasks/reload_api.yml
Normal file
23
ansible/roles/nova/tasks/reload_api.yml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
# NOTE(mgoddard): We use recreate_or_restart_container to cover the case where
|
||||
# nova_safety_upgrade is "yes", and we need to recreate all containers.
|
||||
|
||||
- name: Reload nova API services to remove RPC version pin
|
||||
vars:
|
||||
service: "{{ nova_services[item] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_items:
|
||||
- nova-scheduler
|
||||
- nova-api
|
20
ansible/roles/nova/tasks/reload_super_conductor.yml
Normal file
20
ansible/roles/nova/tasks/reload_super_conductor.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
# NOTE(mgoddard): We use recreate_or_restart_container to cover the case where
|
||||
# nova_safety_upgrade is "yes", and we need to recreate all containers.
|
||||
|
||||
- name: Reload nova super conductor services to remove RPC version pin
|
||||
vars:
|
||||
service: "{{ nova_services['nova-super-conductor'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
privileged: "{{ service.privileged | default(False) }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action == 'upgrade'
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
@ -2,21 +2,6 @@
|
||||
# Create new set of configs on nodes
|
||||
- include_tasks: config.yml
|
||||
|
||||
- include_tasks: bootstrap_service.yml
|
||||
|
||||
- name: Stopping all nova services except nova-compute
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "stop_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
with_dict: "{{ nova_services }}"
|
||||
when:
|
||||
- "'nova-compute' not in item.key"
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_safety_upgrade | bool
|
||||
|
||||
# TODO(donghm): Flush_handlers to restart nova services
|
||||
# should be run in serial nodes to decrease downtime if
|
||||
# the previous task did not run. Update when the
|
||||
@ -25,22 +10,5 @@
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Migrate Nova database
|
||||
vars:
|
||||
nova_api: "{{ nova_services['nova-api'] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_OSM:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ nova_api.image }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_nova"
|
||||
restart_policy: no
|
||||
volumes: "{{ nova_api.volumes }}"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[nova_api.group][0] }}"
|
||||
# NOTE(dszumski): The Nova upgrade is not finished here and
|
||||
# continues in subsequent tasks.
|
||||
|
@ -4,3 +4,4 @@
|
||||
vars:
|
||||
project_services: "{{ nova_services }}"
|
||||
service_name: "{{ project_name }}"
|
||||
tags: nova
|
||||
|
@ -15,6 +15,18 @@
|
||||
first_nova_api_host: "{{ groups['nova-api'][0] }}"
|
||||
when: hostvars[first_nova_api_host]['nova_upgrade_check_stdout']['rc'] not in [0, 1]
|
||||
|
||||
- name: Stopping top level nova services
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "stop_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ item.value.container_name }}"
|
||||
with_dict: "{{ nova_services }}"
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
- nova_safety_upgrade | bool
|
||||
|
||||
- include_tasks: rolling_upgrade.yml
|
||||
|
||||
# NOTE(jeffrey4l): Remove this task in U cycle.
|
||||
|
@ -1 +0,0 @@
|
||||
{{ nova_ssh_key.private_key }}
|
@ -1 +0,0 @@
|
||||
{{ nova_ssh_key.public_key }}
|
@ -1,17 +0,0 @@
|
||||
{% if libvirt_tls | bool %}
|
||||
listen_tls = 1
|
||||
listen_tcp = 0
|
||||
tls_port = "{{ nova_libvirt_port }}"
|
||||
key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
ca_file = "/etc/pki/CA/cacert.pem"
|
||||
{% else %}
|
||||
listen_tcp = 1
|
||||
listen_tls = 0
|
||||
auth_tcp = "none"
|
||||
tcp_port = "{{ nova_libvirt_port }}"
|
||||
ca_file = ""
|
||||
{% endif %}
|
||||
log_level = 3
|
||||
log_outputs = "3:file:/var/log/kolla/libvirt/libvirtd.log"
|
||||
listen_addr = "{{ migration_interface_address }}"
|
17
ansible/roles/nova/templates/nova-api-bootstrap.json.j2
Normal file
17
ansible/roles/nova/templates/nova-api-bootstrap.json.j2
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"command": "false",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-compute",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
{
|
||||
"command": "nova-compute",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}{% if nova_backend == "rbd" %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceph.*",
|
||||
"dest": "/etc/ceph/",
|
||||
"owner": "nova",
|
||||
"perm": "0700"
|
||||
}{% endif %}{% if nova_compute_virt_type == "vmware" and not vmware_vcenter_insecure | bool %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/vmware_ca",
|
||||
"dest": "/etc/nova/vmware_ca",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}{% if libvirt_tls | bool %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/clientkey.pem",
|
||||
"dest": "/etc/pki/libvirt/private/clientkey.pem",
|
||||
"owner": "root:nova",
|
||||
"perm": "0640"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/clientcert.pem",
|
||||
"dest": "/etc/pki/libvirt/clientcert.pem",
|
||||
"owner": "root:nova",
|
||||
"perm": "0640"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/cacert.pem",
|
||||
"dest": "/etc/pki/CA/cacert.pem",
|
||||
"owner": "root:nova",
|
||||
"perm": "0640"
|
||||
}{% endif %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/release",
|
||||
"dest": "/etc/nova/release",
|
||||
"owner": "nova",
|
||||
"perm": "0600",
|
||||
"optional": true
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
},
|
||||
{
|
||||
"path": "/var/lib/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-conductor",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
{
|
||||
"command": "/usr/sbin/libvirtd --listen",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/libvirtd.conf",
|
||||
"dest": "/etc/libvirt/libvirtd.conf",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/qemu.conf",
|
||||
"dest": "/etc/libvirt/qemu.conf",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
}{% if libvirt_tls | bool %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/serverkey.pem",
|
||||
"dest": "/etc/pki/libvirt/private/serverkey.pem",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/servercert.pem",
|
||||
"dest": "/etc/pki/libvirt/servercert.pem",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/clientkey.pem",
|
||||
"dest": "/etc/pki/libvirt/private/clientkey.pem",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/clientcert.pem",
|
||||
"dest": "/etc/pki/libvirt/clientcert.pem",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/cacert.pem",
|
||||
"dest": "/etc/pki/CA/cacert.pem",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
}{% endif %}{% if nova_backend == "rbd" or cinder_backend_ceph | bool %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/secrets",
|
||||
"dest": "/etc/libvirt/secrets",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
}{% endif %}{% if nova_backend == "rbd" %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceph.conf",
|
||||
"dest": "/etc/ceph/ceph.conf",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-novncproxy",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -6,13 +6,7 @@
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"command": "nova-spicehtml5proxy",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
||||
"dest": "/etc/nova/nova.conf",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% if nova_policy_file is defined %},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
|
||||
"dest": "/etc/nova/{{ nova_policy_file }}",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}{% endif %}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"path": "/var/log/kolla/nova",
|
||||
"owner": "nova:nova",
|
||||
"recurse": true
|
||||
}
|
||||
]
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
{
|
||||
"command": "/usr/sbin/sshd -D",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/sshd_config",
|
||||
"dest": "/etc/ssh/sshd_config",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ssh_config",
|
||||
"dest": "/var/lib/nova/.ssh/config",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/id_rsa",
|
||||
"dest": "/var/lib/nova/.ssh/id_rsa",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/id_rsa.pub",
|
||||
"dest": "/var/lib/nova/.ssh/authorized_keys",
|
||||
"owner": "nova",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
{
|
||||
"command": "nova-serialproxy",
|
||||
"command": "nova-conductor",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/nova.conf",
|
@ -1,24 +0,0 @@
|
||||
[libvirt]
|
||||
{% if libvirt_tls | bool %}
|
||||
connection_uri = "qemu+tls://{{ migration_hostname }}/system"
|
||||
live_migration_uri = "qemu+tls://%s/system"
|
||||
{% else %}
|
||||
connection_uri = "qemu+tcp://{{ migration_interface_address | put_address_in_context('url') }}/system"
|
||||
{% endif %}
|
||||
{% if enable_ceph | bool and nova_backend == "rbd" %}
|
||||
images_type = rbd
|
||||
images_rbd_pool = {{ ceph_nova_pool_name }}
|
||||
images_rbd_ceph_conf = /etc/ceph/ceph.conf
|
||||
rbd_user = nova
|
||||
disk_cachemodes="network=writeback"
|
||||
{% if nova_hw_disk_discard != '' %}
|
||||
hw_disk_discard = {{ nova_hw_disk_discard }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if nova_backend == "rbd" and external_ceph_cephx_enabled | bool %}
|
||||
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
||||
{% endif %}
|
||||
virt_type = {{ nova_compute_virt_type }}
|
||||
{% if nova_libvirt_cpu_mode %}
|
||||
cpu_mode = {{ nova_libvirt_cpu_mode }}
|
||||
{% endif %}
|
@ -3,6 +3,9 @@
|
||||
debug = {{ nova_logging_debug }}
|
||||
|
||||
log_dir = /var/log/kolla/nova
|
||||
{% if service_name == "nova-super-conductor" %}
|
||||
log_file = /var/log/kolla/nova/nova-super-conductor.log
|
||||
{% endif %}
|
||||
|
||||
state_path = /var/lib/nova
|
||||
|
||||
@ -16,105 +19,38 @@ metadata_listen_port = {{ nova_metadata_listen_port }}
|
||||
|
||||
allow_resize_to_same_host = true
|
||||
|
||||
{% if service_name == "nova-compute-ironic" %}
|
||||
host={{ ansible_hostname }}-ironic
|
||||
log_file = /var/log/kolla/nova/nova-compute-ironic.log
|
||||
compute_driver = ironic.IronicDriver
|
||||
ram_allocation_ratio = 1.0
|
||||
reserved_host_memory_mb = 0
|
||||
{% elif enable_nova_fake | bool %}
|
||||
host = {{ ansible_hostname }}_{{ service_name }}
|
||||
compute_driver = fake.FakeDriver
|
||||
{% elif nova_compute_virt_type == 'vmware' %}
|
||||
compute_driver = vmwareapi.VMwareVCDriver
|
||||
{% elif nova_compute_virt_type == 'xenapi' %}
|
||||
compute_driver = xenapi.XenAPIDriver
|
||||
{% if service_name == 'nova-compute' %}
|
||||
host = xenapi_facts['dom0_hostname']
|
||||
{% endif %}
|
||||
{% else %}
|
||||
compute_driver = libvirt.LibvirtDriver
|
||||
{% endif %}
|
||||
|
||||
# Though my_ip is not used directly, lots of other variables use $my_ip
|
||||
my_ip = {{ api_interface_address }}
|
||||
|
||||
{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool %}
|
||||
instance_usage_audit = True
|
||||
instance_usage_audit_period = hour
|
||||
{% if enable_watcher | bool %}
|
||||
compute_monitors=nova.compute.monitors.cpu.virt_driver
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
transport_url = {{ rpc_transport_url }}
|
||||
|
||||
{% if enable_blazar | bool %}
|
||||
[filter_scheduler]
|
||||
{% if enable_blazar | bool %}
|
||||
available_filters = nova.scheduler.filters.all_filters
|
||||
available_filters = blazarnova.scheduler.filters.blazar_filter.BlazarFilter
|
||||
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter
|
||||
{% endif %}
|
||||
{% if enable_nova_fake | bool %}
|
||||
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
|
||||
{% endif %}
|
||||
{% if enable_cells | bool %}
|
||||
# When in superconductor mode, nova-compute can't send instance
|
||||
# info updates to the scheduler, so just disable it.
|
||||
track_instance_changes = False
|
||||
{% endif %}
|
||||
|
||||
[api]
|
||||
use_forwarded_for = true
|
||||
|
||||
# Super conductor
|
||||
[conductor]
|
||||
workers = {{ openstack_service_workers }}
|
||||
|
||||
{% if nova_console == 'novnc' %}
|
||||
[vnc]
|
||||
{% if service_name == "nova-compute-ironic" %}
|
||||
enabled = false
|
||||
{% else %}
|
||||
novncproxy_host = {{ api_interface_address }}
|
||||
novncproxy_port = {{ nova_novncproxy_listen_port }}
|
||||
server_listen = {{ api_interface_address }}
|
||||
server_proxyclient_address = {{ api_interface_address }}
|
||||
{% if inventory_hostname in groups['compute'] %}
|
||||
novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn | put_address_in_context('url') }}:{{ nova_novncproxy_port }}/vnc_auto.html
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% elif nova_console == 'spice' %}
|
||||
[vnc]
|
||||
# We have to turn off vnc to use spice
|
||||
enabled = false
|
||||
[spice]
|
||||
enabled = true
|
||||
server_listen = {{ api_interface_address }}
|
||||
server_proxyclient_address = {{ api_interface_address }}
|
||||
{% if inventory_hostname in groups['compute'] %}
|
||||
html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn | put_address_in_context('url') }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
|
||||
{% endif %}
|
||||
html5proxy_host = {{ api_interface_address }}
|
||||
html5proxy_port = {{ nova_spicehtml5proxy_listen_port }}
|
||||
{% elif nova_console == 'none' %}
|
||||
[vnc]
|
||||
enabled = false
|
||||
[spice]
|
||||
enabled = false
|
||||
{% endif %}
|
||||
{% if enable_nova_serialconsole_proxy | bool %}
|
||||
[serial_console]
|
||||
enabled = true
|
||||
base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn | put_address_in_context('url') }}:{{ nova_serialproxy_port }}/
|
||||
serialproxy_host = {{ api_interface_address }}
|
||||
serialproxy_port = {{ nova_serialproxy_listen_port }}
|
||||
proxyclient_address = {{ api_interface_address }}
|
||||
{% endif %}
|
||||
|
||||
{% if service_name == "nova-compute-ironic" %}
|
||||
[ironic]
|
||||
username = {{ ironic_keystone_user }}
|
||||
password = {{ ironic_keystone_password }}
|
||||
auth_url = {{ openstack_auth.auth_url }}/v3
|
||||
auth_type = password
|
||||
project_name = service
|
||||
user_domain_name = {{ default_user_domain_name }}
|
||||
project_domain_name = {{ default_project_domain_name }}
|
||||
endpoint_override = {{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}/v1
|
||||
{% endif %}
|
||||
|
||||
[oslo_middleware]
|
||||
enable_proxy_headers_parsing = True
|
||||
|
||||
@ -148,9 +84,8 @@ password = {{ neutron_keystone_password }}
|
||||
region_name = {{ openstack_region_name }}
|
||||
valid_interfaces = internal
|
||||
|
||||
{% if not service_name.startswith('nova-compute') %}
|
||||
[database]
|
||||
connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
|
||||
connection = mysql+pymysql://{{ nova_cell0_database_user }}:{{ nova_cell0_database_password }}@{{ nova_cell0_database_address }}/{{ nova_cell0_database_name }}
|
||||
max_pool_size = 50
|
||||
max_overflow = 1000
|
||||
max_retries = -1
|
||||
@ -158,7 +93,6 @@ max_retries = -1
|
||||
[api_database]
|
||||
connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }}
|
||||
max_retries = -1
|
||||
{% endif %}
|
||||
|
||||
[cache]
|
||||
backend = oslo_cache.memcache_pool
|
||||
@ -180,27 +114,6 @@ memcache_security_strategy = ENCRYPT
|
||||
memcache_secret_key = {{ memcache_secret_key }}
|
||||
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
{% if service_name == 'nova-compute' %}
|
||||
{% if nova_compute_virt_type in ['kvm', 'qemu'] %}
|
||||
{# must be an include because Ansible 2.8 (and earlier) does not like defined variables referencing undefined variables: migration_interface_address here #}
|
||||
{# see https://github.com/ansible/ansible/issues/58835 #}
|
||||
{% include 'nova.conf.d/libvirt.conf.j2' %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if nova_compute_virt_type == "vmware" %}
|
||||
[vmware]
|
||||
host_ip = {{ vmware_vcenter_host_ip }}
|
||||
host_username = {{ vmware_vcenter_host_username }}
|
||||
host_password = {{ vmware_vcenter_host_password }}
|
||||
cluster_name = {{ vmware_vcenter_cluster_name }}
|
||||
datastore_regex = {{ vmware_vcenter_datastore_regex }}
|
||||
insecure = {{ vmware_vcenter_insecure }}
|
||||
{% if not vmware_vcenter_insecure | bool %}
|
||||
ca_file = /etc/nova/vmware_ca
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
[upgrade_levels]
|
||||
compute = auto
|
||||
|
||||
@ -213,7 +126,7 @@ topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',')
|
||||
driver = noop
|
||||
{% endif %}
|
||||
|
||||
{% if nova_policy_file is defined %}
|
||||
{% if service_name in nova_services_require_policy_json and nova_policy_file is defined %}
|
||||
[oslo_policy]
|
||||
policy_file = {{ nova_policy_file }}
|
||||
{% endif %}
|
||||
@ -240,10 +153,6 @@ max_attempts = 10
|
||||
# -1 is default and means periodic discovery is disabled
|
||||
discover_hosts_in_cells_interval = -1
|
||||
|
||||
{% if enable_nova_fake | bool %}
|
||||
default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
|
||||
{% endif %}
|
||||
|
||||
[placement]
|
||||
auth_type = password
|
||||
auth_url = {{ keystone_admin_url }}
|
||||
@ -278,11 +187,3 @@ connection_string = {{ osprofiler_backend_connection_string }}
|
||||
[barbican]
|
||||
auth_endpoint = {{ keystone_internal_url }}
|
||||
{% endif %}
|
||||
|
||||
{% if nova_compute_virt_type == "xenapi" %}
|
||||
[xenserver]
|
||||
ovs_integration_bridge = br-int
|
||||
connection_password = {{ xenserver_password }}
|
||||
connection_username = {{ xenserver_username }}
|
||||
connection_url = {{ xenserver_connect_protocol }}://{{ xenserver_himn_ip }}
|
||||
{% endif %}
|
||||
|
@ -1,7 +0,0 @@
|
||||
stdio_handler = "file"
|
||||
|
||||
user = "nova"
|
||||
group = "nova"
|
||||
|
||||
max_files = {{ qemu_max_files }}
|
||||
max_processes = {{ qemu_max_processes }}
|
@ -1,6 +0,0 @@
|
||||
<secret ephemeral='no' private='no'>
|
||||
<uuid>{{ item.uuid }}</uuid>
|
||||
<usage type='ceph'>
|
||||
<name>{{ item.name }}</name>
|
||||
</usage>
|
||||
</secret>
|
@ -1,4 +0,0 @@
|
||||
Host *
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
port {{ nova_ssh_port }}
|
@ -1,5 +0,0 @@
|
||||
Port {{ nova_ssh_port }}
|
||||
ListenAddress {{ migration_interface_address }}
|
||||
|
||||
SyslogFacility AUTHPRIV
|
||||
UsePAM yes
|
@ -283,7 +283,16 @@
|
||||
- include_role:
|
||||
role: nova
|
||||
tasks_from: loadbalancer
|
||||
tags: nova
|
||||
tags:
|
||||
- nova
|
||||
- nova-api
|
||||
when: enable_nova | bool
|
||||
- include_role:
|
||||
role: nova-cell
|
||||
tasks_from: loadbalancer
|
||||
tags:
|
||||
- nova
|
||||
- nova-cell
|
||||
when: enable_nova | bool
|
||||
- include_role:
|
||||
role: octavia
|
||||
@ -704,21 +713,9 @@
|
||||
tags: placement,
|
||||
when: enable_placement | bool }
|
||||
|
||||
- name: Apply role nova
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- ceph-mon
|
||||
- compute
|
||||
- nova-api
|
||||
- nova-conductor
|
||||
- nova-novncproxy
|
||||
- nova-scheduler
|
||||
- '&enable_nova_True'
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
roles:
|
||||
- { role: nova,
|
||||
tags: nova,
|
||||
when: enable_nova | bool }
|
||||
# Nova deployment is more complicated than other services, so is covered in its
|
||||
# own playbook.
|
||||
- import_playbook: nova.yml
|
||||
|
||||
- name: Apply role opendaylight
|
||||
gather_facts: false
|
||||
|
@ -198,9 +198,6 @@
|
||||
# Valid options are [ True, False ]
|
||||
#openstack_logging_debug: "False"
|
||||
|
||||
# Valid options are [ none, novnc, spice, rdp ]
|
||||
#nova_console: "novnc"
|
||||
|
||||
# Enable core OpenStack services. This includes:
|
||||
# glance, keystone, neutron, nova, heat, and horizon.
|
||||
#enable_openstack_core: "yes"
|
||||
@ -226,6 +223,7 @@
|
||||
#enable_cadf_notifications: "no"
|
||||
#enable_ceilometer: "no"
|
||||
#enable_ceilometer_ipmi: "no"
|
||||
#enable_cells: "no"
|
||||
#enable_central_logging: "no"
|
||||
#enable_ceph: "no"
|
||||
#enable_ceph_mds: "no"
|
||||
@ -492,12 +490,6 @@
|
||||
# The number of fake driver per compute node
|
||||
#num_nova_fake_per_node: 5
|
||||
|
||||
# Configure nova upgrade option, due to currently kolla support
|
||||
# two upgrade ways for nova: legacy_upgrade and rolling_upgrade
|
||||
# The variable "nova_enable_rolling_upgrade: yes" is meaning
|
||||
# rolling_upgrade were enabled and opposite
|
||||
#nova_enable_rolling_upgrade: "yes"
|
||||
|
||||
# The flag "nova_safety_upgrade" need to be consider when
|
||||
# "nova_enable_rolling_upgrade" is enabled. The "nova_safety_upgrade"
|
||||
# controls whether the nova services are all stopped before rolling
|
||||
@ -507,6 +499,9 @@
|
||||
# new version. And opposite.
|
||||
#nova_safety_upgrade: "no"
|
||||
|
||||
# Valid options are [ none, novnc, spice, rdp ]
|
||||
#nova_console: "novnc"
|
||||
|
||||
#################
|
||||
# Hyper-V options
|
||||
#################
|
||||
|
10
releasenotes/notes/nova-cells-02810dd035caded1.yaml
Normal file
10
releasenotes/notes/nova-cells-02810dd035caded1.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Adds initial support for deployment of multiple Nova cells. Cells allow
|
||||
Nova clouds to scale to large sizes, by sharding the compute hosts into
|
||||
multiple pools called *cells*.
|
||||
|
||||
This feature is still evolving, and the associated configuration may be
|
||||
liable to change in the next release to support more advanced deployment
|
||||
topologies.
|
@ -98,3 +98,7 @@ ironic_dnsmasq_dhcp_range: "10.42.0.2,10.42.0.254"
|
||||
{% if scenario == "masakari" %}
|
||||
enable_masakari: "yes"
|
||||
{% endif %}
|
||||
|
||||
{% if scenario == "cells" %}
|
||||
enable_cells: "yes"
|
||||
{% endif %}
|
||||
|
@ -30,6 +30,21 @@
|
||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||
{% endfor %}
|
||||
|
||||
{% if scenario == 'cells' %}
|
||||
{% for host in hostvars %}
|
||||
{% set cell_name = 'cell' ~ loop.index %}
|
||||
[{{ cell_name }}]
|
||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||
|
||||
[{{ cell_name }}:vars]
|
||||
nova_cell_name = {{ cell_name }}
|
||||
nova_cell_compute_group = {{ cell_name }}
|
||||
nova_cell_conductor_group = {{ cell_name }}
|
||||
nova_cell_novncproxy_group = {{ cell_name }}
|
||||
nova_novncproxy_port = {{ 6080 + loop.index0 }}
|
||||
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
# You can explicitly specify which hosts run each project by updating the
|
||||
# groups in the sections below. Common services are grouped together.
|
||||
[chrony-server:children]
|
||||
@ -263,6 +278,9 @@ nova
|
||||
[nova-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-super-conductor:children]
|
||||
nova
|
||||
|
||||
# TODO(yoctozepto): Remove completely in the Ussuri cycle.
|
||||
{% if is_previous_release and previous_release == 'stein' %}
|
||||
[nova-consoleauth:children]
|
||||
|
@ -250,3 +250,13 @@
|
||||
base_distro: centos
|
||||
install_type: source
|
||||
scenario: masakari
|
||||
|
||||
- job:
|
||||
name: kolla-ansible-centos-source-cells
|
||||
parent: kolla-ansible-base
|
||||
nodeset: kolla-ansible-centos-multi
|
||||
voting: false
|
||||
vars:
|
||||
base_distro: centos
|
||||
install_type: source
|
||||
scenario: cells
|
||||
|
@ -47,6 +47,7 @@
|
||||
- kolla-ansible-ubuntu-source-upgrade-ceph
|
||||
- kolla-ansible-centos-binary
|
||||
- kolla-ansible-ubuntu-binary
|
||||
- kolla-ansible-centos-source-cells
|
||||
gate:
|
||||
queue: kolla
|
||||
jobs:
|
||||
|
Loading…
Reference in New Issue
Block a user