Add support for monitoring nodes
Currently these nodes are not deployed using kolla-ansible but use the host provisioning and host OS configuration pieces of kayobe. The monasca-deploy project is used to deploy the monitoring services.
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure development tools are installed
|
- name: Ensure development tools are installed
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: dev-tools
|
- role: dev-tools
|
||||||
|
@@ -3,6 +3,6 @@
|
|||||||
# servers but gets in the way after this as it tries to enable all network
|
# servers but gets in the way after this as it tries to enable all network
|
||||||
# interfaces. In some cases this can lead to timeouts.
|
# interfaces. In some cases this can lead to timeouts.
|
||||||
- name: Ensure Glean is disabled and its artifacts are removed
|
- name: Ensure Glean is disabled and its artifacts are removed
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: disable-glean
|
- role: disable-glean
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Disable SELinux and reboot if required
|
- name: Disable SELinux and reboot if required
|
||||||
hosts: controllers:seed
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: disable-selinux
|
- role: disable-selinux
|
||||||
disable_selinux_reboot_timeout: "{{ 600 if ansible_virtualization_role == 'host' else 300 }}"
|
disable_selinux_reboot_timeout: "{{ 600 if ansible_virtualization_role == 'host' else 300 }}"
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure that controller BIOS are configured
|
- name: Ensure that overcloud nodes' BIOS are configured
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
bios_config:
|
bios_config:
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure that controller boot order is configured
|
- name: Ensure that overcloud nodes' boot order is configured
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
ansible_host: "{{ ipmi_address }}"
|
ansible_host: "{{ ipmi_address }}"
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure that controller BIOS are configured
|
- name: Gather and display BIOS and RAID facts from iDRACs
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
roles:
|
roles:
|
||||||
# The role simply pulls in the drac_facts module.
|
# The role simply pulls in the drac_facts module.
|
||||||
|
@@ -6,6 +6,21 @@
|
|||||||
# to setup the Kayobe user account.
|
# to setup the Kayobe user account.
|
||||||
controller_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
controller_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Controller network interface configuration.
|
||||||
|
|
||||||
|
# List of default networks to which controller nodes are attached.
|
||||||
|
controller_default_network_interfaces: >
|
||||||
|
{{ [provision_oc_net_name,
|
||||||
|
provision_wl_net_name,
|
||||||
|
internal_net_name,
|
||||||
|
external_net_name,
|
||||||
|
storage_net_name,
|
||||||
|
storage_mgmt_net_name] | unique | list }}
|
||||||
|
|
||||||
|
# List of extra networks to which controller nodes are attached.
|
||||||
|
controller_extra_network_interfaces: []
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Controller node BIOS configuration.
|
# Controller node BIOS configuration.
|
||||||
|
|
||||||
|
64
ansible/group_vars/all/monitoring
Normal file
64
ansible/group_vars/all/monitoring
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node configuration.
|
||||||
|
|
||||||
|
# User with which to access the monitoring nodes via SSH during bootstrap, in
|
||||||
|
# order to setup the Kayobe user account.
|
||||||
|
monitoring_bootstrap_user: "{{ controller_bootstrap_user }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node network interface configuration.
|
||||||
|
|
||||||
|
# List of default networks to which monitoring nodes are attached.
|
||||||
|
monitoring_default_network_interfaces: >
|
||||||
|
{{ [provision_oc_net_name,
|
||||||
|
internal_net_name,
|
||||||
|
external_net_name] | unique | list }}
|
||||||
|
|
||||||
|
# List of extra networks to which monitoring nodes are attached.
|
||||||
|
monitoring_extra_network_interfaces: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
monitoring_bios_config: "{{ monitoring_bios_config_default | combine(monitoring_bios_config_extra) }}"
|
||||||
|
|
||||||
|
# Dict of default monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
monitoring_bios_config_default: "{{ controller_bios_config_default }}"
|
||||||
|
|
||||||
|
# Dict of additional monitoring node BIOS options. Format is same as that used
|
||||||
|
# by stackhpc.drac role.
|
||||||
|
monitoring_bios_config_extra: "{{ controller_bios_config_extra }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node RAID configuration.
|
||||||
|
|
||||||
|
# List of monitoring node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
monitoring_raid_config: "{{ monitoring_raid_config_default + monitoring_raid_config_extra }}"
|
||||||
|
|
||||||
|
# List of default monitoring node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
monitoring_raid_config_default: "{{ controller_raid_config_default }}"
|
||||||
|
|
||||||
|
# List of additional monitoring node RAID volumes. Format is same as that used
|
||||||
|
# by stackhpc.drac role.
|
||||||
|
monitoring_raid_config_extra: "{{ controller_raid_config_extra }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node LVM configuration.
|
||||||
|
|
||||||
|
# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
monitoring_lvm_groups: "{{ monitoring_lvm_groups_default + monitoring_lvm_groups_extra }}"
|
||||||
|
|
||||||
|
# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm
|
||||||
|
# role for format.
|
||||||
|
monitoring_lvm_groups_default: "{{ controller_lvm_groups_default }}"
|
||||||
|
|
||||||
|
# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm
|
||||||
|
# role for format.
|
||||||
|
monitoring_lvm_groups_extra: "{{ controller_lvm_groups_extra }}"
|
17
ansible/group_vars/all/overcloud
Normal file
17
ansible/group_vars/all/overcloud
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Overcloud configuration.
|
||||||
|
|
||||||
|
# Default Ansible group for overcloud hosts if not present in
|
||||||
|
# overcloud_group_hosts_map.
|
||||||
|
overcloud_group_default: controllers
|
||||||
|
|
||||||
|
# List of names of Ansible groups for overcloud hosts.
|
||||||
|
overcloud_groups:
|
||||||
|
- controllers
|
||||||
|
- monitoring
|
||||||
|
|
||||||
|
# Dict mapping overcloud Ansible group names to lists of hosts in the group.
|
||||||
|
# As a special case, the group 'ignore' can be used to specify hosts that
|
||||||
|
# should not be added to the inventory.
|
||||||
|
overcloud_group_hosts_map: {}
|
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
# Host/IP with which to access the controllers via SSH.
|
|
||||||
ansible_host: "{{ provision_oc_net_name | net_ip }}"
|
|
7
ansible/group_vars/controllers/bios
Normal file
7
ansible/group_vars/controllers/bios
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Controller node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
bios_config: "{{ controller_bios_config }}"
|
@@ -6,15 +6,3 @@
|
|||||||
network_interfaces: >
|
network_interfaces: >
|
||||||
{{ (controller_default_network_interfaces +
|
{{ (controller_default_network_interfaces +
|
||||||
controller_extra_network_interfaces) | unique | list }}
|
controller_extra_network_interfaces) | unique | list }}
|
||||||
|
|
||||||
# List of default networks to which controller nodes are attached.
|
|
||||||
controller_default_network_interfaces: >
|
|
||||||
{{ [provision_oc_net_name,
|
|
||||||
provision_wl_net_name,
|
|
||||||
internal_net_name,
|
|
||||||
external_net_name,
|
|
||||||
storage_net_name,
|
|
||||||
storage_mgmt_net_name] | unique | list }}
|
|
||||||
|
|
||||||
# List of extra networks to which controller nodes are attached.
|
|
||||||
controller_extra_network_interfaces: []
|
|
||||||
|
7
ansible/group_vars/controllers/raid
Normal file
7
ansible/group_vars/controllers/raid
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Controller node RAID configuration.
|
||||||
|
|
||||||
|
# List of controller node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
raid_config: "{{ controller_raid_config }}"
|
7
ansible/group_vars/monitoring/ansible-user
Normal file
7
ansible/group_vars/monitoring/ansible-user
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
# User with which to access the monitoring nodes via SSH.
|
||||||
|
ansible_user: "{{ kayobe_ansible_user }}"
|
||||||
|
|
||||||
|
# User with which to access the monitoring nodes before the kayobe_ansible_user
|
||||||
|
# account has been created.
|
||||||
|
bootstrap_user: "{{ monitoring_bootstrap_user }}"
|
10
ansible/group_vars/monitoring/bios
Normal file
10
ansible/group_vars/monitoring/bios
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
bios_config: >
|
||||||
|
{{ controller_bios_config
|
||||||
|
if inventory_hostname in groups['controllers'] else
|
||||||
|
monitoring_bios_config }}
|
9
ansible/group_vars/monitoring/lvm
Normal file
9
ansible/group_vars/monitoring/lvm
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node LVM configuration.
|
||||||
|
|
||||||
|
# List of LVM volume groups.
|
||||||
|
lvm_groups: >
|
||||||
|
{{ controller_lvm_groups
|
||||||
|
if inventory_hostname in groups['controllers'] else
|
||||||
|
monitoring_lvm_groups }}
|
11
ansible/group_vars/monitoring/network
Normal file
11
ansible/group_vars/monitoring/network
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface attachments.
|
||||||
|
|
||||||
|
# List of networks to which these nodes are attached.
|
||||||
|
network_interfaces: >
|
||||||
|
{{ (controller_default_network_interfaces +
|
||||||
|
controller_extra_network_interfaces) | unique | list
|
||||||
|
if inventory_hostname in groups['controllers'] else
|
||||||
|
(monitoring_default_network_interfaces +
|
||||||
|
monitoring_extra_network_interfaces) | unique | list }}
|
10
ansible/group_vars/monitoring/raid
Normal file
10
ansible/group_vars/monitoring/raid
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node RAID configuration.
|
||||||
|
|
||||||
|
# List of monitoring node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
raid_config: >
|
||||||
|
{{ controller_raid_config
|
||||||
|
if inventory_hostname in groups['controllers'] else
|
||||||
|
monitoring_raid_config }}
|
3
ansible/group_vars/overcloud/ansible-host
Normal file
3
ansible/group_vars/overcloud/ansible-host
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# Host/IP with which to access the overcloud nodes via SSH.
|
||||||
|
ansible_host: "{{ provision_oc_net_name | net_ip }}"
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure IP addresses are allocated
|
- name: Ensure IP addresses are allocated
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Initialise the IP allocations fact
|
- name: Initialise the IP allocations fact
|
||||||
|
@@ -2,6 +2,6 @@
|
|||||||
# Enable IP routing in the kernel.
|
# Enable IP routing in the kernel.
|
||||||
|
|
||||||
- name: Ensure IP routing is enabled
|
- name: Ensure IP routing is enabled
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: ip-routing
|
- role: ip-routing
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure the Kayobe Ansible user account exists
|
- name: Ensure the Kayobe Ansible user account exists
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
vars:
|
vars:
|
||||||
ansible_user: "{{ bootstrap_user }}"
|
ansible_user: "{{ bootstrap_user }}"
|
||||||
tasks:
|
tasks:
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
# Update the Bifrost inventory with the IP allocation and other variables.
|
# Update the Bifrost inventory with the IP allocation and other variables.
|
||||||
|
|
||||||
- name: Ensure the Bifrost controller inventory is initialised
|
- name: Ensure the Bifrost overcloud inventory is initialised
|
||||||
hosts: seed
|
hosts: seed
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
tasks:
|
tasks:
|
||||||
@@ -20,8 +20,8 @@
|
|||||||
force: True
|
force: True
|
||||||
become: True
|
become: True
|
||||||
|
|
||||||
- name: Ensure the Bifrost controller inventory is populated
|
- name: Ensure the Bifrost overcloud inventory is populated
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
seed_host: "{{ groups['seed'][0] }}"
|
seed_host: "{{ groups['seed'][0] }}"
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure LVM configuration is applied
|
- name: Ensure LVM configuration is applied
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Fail if the LVM physical disks have not been configured
|
- name: Fail if the LVM physical disks have not been configured
|
||||||
fail:
|
fail:
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure networking is configured
|
- name: Ensure networking is configured
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
tags:
|
tags:
|
||||||
- config
|
- config
|
||||||
vars:
|
vars:
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure NTP is installed and configured
|
- name: Ensure NTP is installed and configured
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: yatesr.timezone
|
- role: yatesr.timezone
|
||||||
become: True
|
become: True
|
||||||
|
@@ -6,30 +6,30 @@
|
|||||||
# set the ironic nodes' to maintenance mode to prevent ironic from managing
|
# set the ironic nodes' to maintenance mode to prevent ironic from managing
|
||||||
# their power states.
|
# their power states.
|
||||||
|
|
||||||
- name: Group controller hosts by their BMC type
|
- name: Group overcloud nodes by their BMC type
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
# List of BMC types supporting BIOS and RAID configuration.
|
# List of BMC types supporting BIOS and RAID configuration.
|
||||||
supported_bmc_types:
|
supported_bmc_types:
|
||||||
- idrac
|
- idrac
|
||||||
tasks:
|
tasks:
|
||||||
- name: Fail if controller has BIOS and/or RAID configuration and BMC type is not supported
|
- name: Fail if node has BIOS and/or RAID configuration and BMC type is not supported
|
||||||
fail:
|
fail:
|
||||||
msg: >
|
msg: >
|
||||||
Controller has BIOS and/or RAID configuration but BMC type
|
Node has BIOS and/or RAID configuration but BMC type
|
||||||
{% if bmc_type is undefined %}is not defined{% else %}{{ bmc_type }}
|
{% if bmc_type is undefined %}is not defined{% else %}{{ bmc_type }}
|
||||||
is not supported{% endif %}.
|
is not supported{% endif %}.
|
||||||
when:
|
when:
|
||||||
- "{{ controller_bios_config or controller_raid_config }}"
|
- "{{ bios_config or raid_config }}"
|
||||||
- "{{ bmc_type is undefined or bmc_type not in supported_bmc_types }}"
|
- "{{ bmc_type is undefined or bmc_type not in supported_bmc_types }}"
|
||||||
|
|
||||||
- name: Group controller hosts by their BMC type
|
- name: Group overcloud hosts by their BMC type
|
||||||
group_by:
|
group_by:
|
||||||
key: "controllers_with_bmcs_of_type_{{ bmc_type | default('unknown') }}"
|
key: "overcloud_with_bmcs_of_type_{{ bmc_type | default('unknown') }}"
|
||||||
|
|
||||||
- name: Check whether any changes to controller BIOS and RAID configuration are required
|
- name: Check whether any changes to nodes' BIOS and RAID configuration are required
|
||||||
hosts: controllers_with_bmcs_of_type_idrac
|
hosts: overcloud_with_bmcs_of_type_idrac
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
# Set this to False to avoid rebooting the nodes after configuration.
|
# Set this to False to avoid rebooting the nodes after configuration.
|
||||||
@@ -39,22 +39,22 @@
|
|||||||
drac_address: "{{ ipmi_address }}"
|
drac_address: "{{ ipmi_address }}"
|
||||||
drac_username: "{{ ipmi_username }}"
|
drac_username: "{{ ipmi_username }}"
|
||||||
drac_password: "{{ ipmi_password }}"
|
drac_password: "{{ ipmi_password }}"
|
||||||
drac_bios_config: "{{ controller_bios_config }}"
|
drac_bios_config: "{{ bios_config }}"
|
||||||
drac_raid_config: "{{ controller_raid_config }}"
|
drac_raid_config: "{{ raid_config }}"
|
||||||
drac_check_mode: True
|
drac_check_mode: True
|
||||||
tasks:
|
tasks:
|
||||||
- name: Set a fact about whether the configuration changed
|
- name: Set a fact about whether the configuration changed
|
||||||
set_fact:
|
set_fact:
|
||||||
bios_or_raid_change: "{{ drac_result | changed }}"
|
bios_or_raid_change: "{{ drac_result | changed }}"
|
||||||
|
|
||||||
- name: Ensure that controller BIOS and RAID volumes are configured
|
- name: Ensure that overcloud BIOS and RAID volumes are configured
|
||||||
hosts: controllers_with_bmcs_of_type_idrac
|
hosts: overcloud_with_bmcs_of_type_idrac
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
# Set this to False to avoid rebooting the nodes after configuration.
|
# Set this to False to avoid rebooting the nodes after configuration.
|
||||||
drac_reboot: True
|
drac_reboot: True
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Set the controller nodes' maintenance mode
|
- name: Set the overcloud nodes' maintenance mode
|
||||||
command: >
|
command: >
|
||||||
docker exec bifrost_deploy
|
docker exec bifrost_deploy
|
||||||
bash -c '. env-vars &&
|
bash -c '. env-vars &&
|
||||||
@@ -82,12 +82,12 @@
|
|||||||
drac_address: "{{ ipmi_address }}"
|
drac_address: "{{ ipmi_address }}"
|
||||||
drac_username: "{{ ipmi_username }}"
|
drac_username: "{{ ipmi_username }}"
|
||||||
drac_password: "{{ ipmi_password }}"
|
drac_password: "{{ ipmi_password }}"
|
||||||
drac_bios_config: "{{ controller_bios_config }}"
|
drac_bios_config: "{{ bios_config }}"
|
||||||
drac_raid_config: "{{ controller_raid_config }}"
|
drac_raid_config: "{{ raid_config }}"
|
||||||
when: "{{ bios_or_raid_change | bool }}"
|
when: "{{ bios_or_raid_change | bool }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Unset the controller nodes' maintenance mode
|
- name: Unset the overcloud nodes' maintenance mode
|
||||||
command: >
|
command: >
|
||||||
docker exec bifrost_deploy
|
docker exec bifrost_deploy
|
||||||
bash -c '. env-vars &&
|
bash -c '. env-vars &&
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
# Use bifrost to deprovision the overcloud nodes.
|
# Use bifrost to deprovision the overcloud nodes.
|
||||||
|
|
||||||
- name: Ensure the overcloud controllers are deprovisioned
|
- name: Ensure the overcloud nodes are deprovisioned
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
vars:
|
vars:
|
||||||
# Set to False to avoid waiting for the controllers to become active.
|
# Set to False to avoid waiting for the nodes to become active.
|
||||||
wait_available: True
|
wait_available: True
|
||||||
wait_available_timeout: 600
|
wait_available_timeout: 600
|
||||||
wait_available_interval: 10
|
wait_available_interval: 10
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
# Use bifrost to inspect the overcloud nodes' hardware.
|
# Use bifrost to inspect the overcloud nodes' hardware.
|
||||||
|
|
||||||
- name: Ensure the overcloud controller hardware is inspected
|
- name: Ensure the overcloud nodes' hardware is inspected
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
vars:
|
vars:
|
||||||
# Set to False to avoid waiting for the controllers to become active.
|
# Set to False to avoid waiting for the nodes to become active.
|
||||||
wait_inspected: True
|
wait_inspected: True
|
||||||
wait_inspected_timeout: 600
|
wait_inspected_timeout: 600
|
||||||
wait_inspected_interval: 10
|
wait_inspected_interval: 10
|
||||||
@@ -133,7 +133,7 @@
|
|||||||
when:
|
when:
|
||||||
- "{{ wait_inspected | bool }}"
|
- "{{ wait_inspected | bool }}"
|
||||||
|
|
||||||
- name: Fail if any of the controllers are not manageable
|
- name: Fail if any of the nodes are not manageable
|
||||||
fail:
|
fail:
|
||||||
msg: >
|
msg: >
|
||||||
Ironic node for {{ inventory_hostname }} is in an unexpected
|
Ironic node for {{ inventory_hostname }} is in an unexpected
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
# Gather an inventory of nodes from the seed's Ironic service. Use this to
|
# Gather an inventory of nodes from the seed's Ironic service. Use this to
|
||||||
# generate an Ansible inventory for Kayobe.
|
# generate an Ansible inventory for Kayobe.
|
||||||
|
|
||||||
- name: Ensure the controller Ansible inventory is populated
|
- name: Ensure the overcloud Ansible inventory is populated
|
||||||
hosts: seed
|
hosts: seed
|
||||||
tasks:
|
tasks:
|
||||||
- name: Gather the Ironic node inventory using Bifrost
|
- name: Gather the Ironic node inventory using Bifrost
|
||||||
@@ -18,20 +18,36 @@
|
|||||||
set_fact:
|
set_fact:
|
||||||
ironic_inventory: "{{ inventory_result.stdout | from_json }}"
|
ironic_inventory: "{{ inventory_result.stdout | from_json }}"
|
||||||
|
|
||||||
- name: Ensure Kayobe controller inventory exists
|
- name: Ensure Kayobe overcloud inventory exists
|
||||||
local_action:
|
local_action:
|
||||||
module: copy
|
module: copy
|
||||||
content: |
|
content: |
|
||||||
# Managed by Ansible - do not edit.
|
# Managed by Ansible - do not edit.
|
||||||
# This is the Kayobe controller inventory, autogenerated from the seed
|
# This is the Kayobe overcloud inventory, autogenerated from the seed
|
||||||
# node's Ironic inventory.
|
# node's Ironic inventory.
|
||||||
|
|
||||||
[controllers]
|
{# Build a list of all hosts with explicit mappings. #}
|
||||||
|
{% set all_mapped_hosts = [] %}
|
||||||
|
{% for hosts in overcloud_group_hosts_map.values() %}
|
||||||
|
{% set _ = all_mapped_hosts.extend(hosts) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set ignore_hosts = overcloud_group_hosts_map.get("ignore", []) %}
|
||||||
|
|
||||||
|
{# Add a section for each group. #}
|
||||||
|
{% for group in overcloud_groups %}
|
||||||
|
[{{ group }}]
|
||||||
|
{% set group_hosts = overcloud_group_hosts_map.get(group, []) %}
|
||||||
{% for host in ironic_inventory.baremetal.hosts %}
|
{% for host in ironic_inventory.baremetal.hosts %}
|
||||||
|
{% if (host in group_hosts or
|
||||||
|
(group == overcloud_group_default and host not in all_mapped_hosts))
|
||||||
|
and host not in ignore_hosts %}
|
||||||
{% set hostvars=ironic_inventory._meta.hostvars[host] %}
|
{% set hostvars=ironic_inventory._meta.hostvars[host] %}
|
||||||
{% set ipmi_address=hostvars.driver_info.ipmi_address | default %}
|
{% set ipmi_address=hostvars.driver_info.ipmi_address | default %}
|
||||||
{% set system_vendor=hostvars.extra.system_vendor | default %}
|
{% set system_vendor=hostvars.extra.system_vendor | default %}
|
||||||
{% set bmc_type=system_vendor | bmc_type_from_system_vendor %}
|
{% set bmc_type=system_vendor | bmc_type_from_system_vendor %}
|
||||||
{{ host }} ipmi_address={{ ipmi_address }} bmc_type={{ bmc_type }}
|
{{ host }} ipmi_address={{ ipmi_address }} bmc_type={{ bmc_type }}
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
dest: "{{ kayobe_config_path }}/inventory/controllers"
|
|
||||||
|
{% endfor %}
|
||||||
|
dest: "{{ kayobe_config_path }}/inventory/overcloud"
|
||||||
|
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
# Use bifrost to provision the overcloud nodes with a base OS.
|
# Use bifrost to provision the overcloud nodes with a base OS.
|
||||||
|
|
||||||
- name: Ensure the overcloud controllers are provisioned
|
- name: Ensure the overcloud nodes are provisioned
|
||||||
hosts: controllers
|
hosts: overcloud
|
||||||
vars:
|
vars:
|
||||||
# Set to False to avoid waiting for the controllers to become active.
|
# Set to False to avoid waiting for the nodes to become active.
|
||||||
wait_active: True
|
wait_active: True
|
||||||
wait_active_timeout: 600
|
wait_active_timeout: 600
|
||||||
wait_active_interval: 10
|
wait_active_interval: 10
|
||||||
# Set to False to avoid waiting for the controllers to be accessible via
|
# Set to False to avoid waiting for the nodes to be accessible via
|
||||||
# SSH.
|
# SSH.
|
||||||
wait_ssh: True
|
wait_ssh: True
|
||||||
wait_ssh_timeout: 600
|
wait_ssh_timeout: 600
|
||||||
@@ -133,7 +133,7 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- "{{ hostvars[groups['seed'][0]].ansible_host }}"
|
- "{{ hostvars[groups['seed'][0]].ansible_host }}"
|
||||||
# We execute this only once, allowing the Bifrost Ansible to handle
|
# We execute this only once, allowing the Bifrost Ansible to handle
|
||||||
# multiple controllers.
|
# multiple nodes.
|
||||||
run_once: True
|
run_once: True
|
||||||
|
|
||||||
- name: Wait for the ironic node to become active
|
- name: Wait for the ironic node to become active
|
||||||
@@ -171,7 +171,7 @@
|
|||||||
- "{{ wait_active | bool }}"
|
- "{{ wait_active | bool }}"
|
||||||
- "{{ initial_provision_state != 'active' }}"
|
- "{{ initial_provision_state != 'active' }}"
|
||||||
|
|
||||||
- name: Fail if any of the controllers are not available
|
- name: Fail if any of the nodes are not available
|
||||||
fail:
|
fail:
|
||||||
msg: >
|
msg: >
|
||||||
Ironic node for {{ inventory_hostname }} is in an unexpected
|
Ironic node for {{ inventory_hostname }} is in an unexpected
|
||||||
@@ -182,7 +182,7 @@
|
|||||||
- "{{ initial_provision_state != 'active' }}"
|
- "{{ initial_provision_state != 'active' }}"
|
||||||
- "{{ final_provision_state != 'active' }}"
|
- "{{ final_provision_state != 'active' }}"
|
||||||
|
|
||||||
- name: Wait for SSH access to the controllers
|
- name: Wait for SSH access to the nodes
|
||||||
local_action:
|
local_action:
|
||||||
module: wait_for
|
module: wait_for
|
||||||
host: "{{ ansible_host }}"
|
host: "{{ ansible_host }}"
|
||||||
|
@@ -27,8 +27,17 @@ controllers
|
|||||||
|
|
||||||
[compute:children]
|
[compute:children]
|
||||||
|
|
||||||
[monitoring:children]
|
[monitoring]
|
||||||
controllers
|
# These hostnames must be resolvable from your deployment host
|
||||||
|
{% for monitoring_host in groups['monitoring'] %}
|
||||||
|
{% set monitoring_hv=hostvars[monitoring_host] %}
|
||||||
|
{{ monitoring_host }}{% if "ansible_host" in monitoring_hv %} ansible_host={{ monitoring_hv["ansible_host"] }}{% endif %}
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[monitoring:vars]
|
||||||
|
ansible_user=kolla
|
||||||
|
ansible_become=true
|
||||||
|
|
||||||
[storage:children]
|
[storage:children]
|
||||||
controllers
|
controllers
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
# Enable SNAT using iptables.
|
# Enable SNAT using iptables.
|
||||||
|
|
||||||
- name: Ensure SNAT is configured
|
- name: Ensure SNAT is configured
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
vars:
|
vars:
|
||||||
snat_rules:
|
snat_rules:
|
||||||
- interface: "{{ ansible_default_ipv4.interface }}"
|
- interface: "{{ ansible_default_ipv4.interface }}"
|
||||||
|
@@ -7,6 +7,6 @@
|
|||||||
# any LVM or file system state from them.
|
# any LVM or file system state from them.
|
||||||
|
|
||||||
- name: Ensure that all unmounted block devices are wiped
|
- name: Ensure that all unmounted block devices are wiped
|
||||||
hosts: seed:controllers
|
hosts: seed:overcloud
|
||||||
roles:
|
roles:
|
||||||
- role: wipe-disks
|
- role: wipe-disks
|
||||||
|
@@ -7,6 +7,13 @@
|
|||||||
[controllers]
|
[controllers]
|
||||||
# Empty group to provide declaration of controllers group.
|
# Empty group to provide declaration of controllers group.
|
||||||
|
|
||||||
|
[monitoring]
|
||||||
|
# Empty group to provide declaration of monitoring group.
|
||||||
|
|
||||||
|
[overcloud:children]
|
||||||
|
controllers
|
||||||
|
monitoring
|
||||||
|
|
||||||
[docker:children]
|
[docker:children]
|
||||||
# Hosts in this group will have Docker installed.
|
# Hosts in this group will have Docker installed.
|
||||||
seed
|
seed
|
||||||
|
56
etc/kayobe/monitoring.yml
Normal file
56
etc/kayobe/monitoring.yml
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node configuration.
|
||||||
|
|
||||||
|
# User with which to access the monitoring nodes via SSH during bootstrap, in
|
||||||
|
# order to setup the Kayobe user account.
|
||||||
|
#monitoring_bootstrap_user:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#monitoring_bios_config:
|
||||||
|
|
||||||
|
# Dict of default monitoring node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#monitoring_bios_config_default:
|
||||||
|
|
||||||
|
# Dict of additional monitoring node BIOS options. Format is same as that used
|
||||||
|
# by stackhpc.drac role.
|
||||||
|
#monitoring_bios_config_extra:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node RAID configuration.
|
||||||
|
|
||||||
|
# List of monitoring node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#monitoring_raid_config:
|
||||||
|
|
||||||
|
# List of default monitoring node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#monitoring_raid_config_default:
|
||||||
|
|
||||||
|
# List of additional monitoring node RAID volumes. Format is same as that used
|
||||||
|
# by stackhpc.drac role.
|
||||||
|
#monitoring_raid_config_extra:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Monitoring node LVM configuration.
|
||||||
|
|
||||||
|
# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
#monitoring_lvm_groups:
|
||||||
|
|
||||||
|
# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm
|
||||||
|
# role for format.
|
||||||
|
#monitoring_lvm_groups_default:
|
||||||
|
|
||||||
|
# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm
|
||||||
|
# role for format.
|
||||||
|
#monitoring_lvm_groups_extra:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
14
etc/kayobe/overcloud.yml
Normal file
14
etc/kayobe/overcloud.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Overcloud configuration.
|
||||||
|
|
||||||
|
# Default Ansible group for overcloud hosts.
|
||||||
|
#overcloud_group_default:
|
||||||
|
|
||||||
|
# List of names of Ansible groups for overcloud hosts.
|
||||||
|
#overcloud_groups:
|
||||||
|
|
||||||
|
# Dict mapping overcloud Ansible group names to lists of hosts in the group.
|
||||||
|
# As a special case, the group 'ignore' can be used to specify hosts that
|
||||||
|
# should not be added to the inventory.
|
||||||
|
#overcloud_group_hosts_map:
|
@@ -373,7 +373,8 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
|||||||
def take_action(self, parsed_args):
|
def take_action(self, parsed_args):
|
||||||
self.app.LOG.debug("Configuring overcloud host OS")
|
self.app.LOG.debug("Configuring overcloud host OS")
|
||||||
ansible_user = self.run_kayobe_config_dump(
|
ansible_user = self.run_kayobe_config_dump(
|
||||||
parsed_args, host="controllers[0]", var_name="kayobe_ansible_user")
|
parsed_args, var_name="kayobe_ansible_user")
|
||||||
|
ansible_user = ansible_user.values()[0]
|
||||||
playbooks = _build_playbook_list(
|
playbooks = _build_playbook_list(
|
||||||
"ip-allocation", "ssh-known-host", "kayobe-ansible-user")
|
"ip-allocation", "ssh-known-host", "kayobe-ansible-user")
|
||||||
if parsed_args.wipe_disks:
|
if parsed_args.wipe_disks:
|
||||||
@@ -381,12 +382,12 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
|||||||
playbooks += _build_playbook_list(
|
playbooks += _build_playbook_list(
|
||||||
"dev-tools", "disable-selinux", "network", "disable-glean", "ntp",
|
"dev-tools", "disable-selinux", "network", "disable-glean", "ntp",
|
||||||
"lvm")
|
"lvm")
|
||||||
self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers")
|
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
||||||
extra_vars = {"ansible_user": ansible_user}
|
extra_vars = {"ansible_user": ansible_user}
|
||||||
self.run_kolla_ansible_overcloud(parsed_args, "bootstrap-servers",
|
self.run_kolla_ansible_overcloud(parsed_args, "bootstrap-servers",
|
||||||
extra_vars=extra_vars)
|
extra_vars=extra_vars)
|
||||||
playbooks = _build_playbook_list("kolla-host", "docker")
|
playbooks = _build_playbook_list("kolla-host", "docker")
|
||||||
self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers")
|
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
||||||
|
|
||||||
|
|
||||||
class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
||||||
|
Reference in New Issue
Block a user