Merge branch 'master' into kolla-build-config-path

This commit is contained in:
Mark Goddard 2017-10-26 17:53:00 +01:00 committed by GitHub
commit 95715c1296
58 changed files with 788 additions and 215 deletions

View File

@ -61,7 +61,7 @@
'ram': item.RAM | int,
'disk': item.Disk | int}] }}
with_items: "{{ existing_nova_flavors }}"
when: "{{ item.Name.startswith(flavor_base_name) }}"
when: item.Name.startswith(flavor_base_name)
# Build a list of nova flavors to create. Here we offset the flavor name
# index by the length of the relevant existing flavor list. Note that this

View File

@ -40,7 +40,7 @@
source {{ venv }}/bin/activate &&
openstack baremetal node manage {{ item['Name'] }}
with_items: "{{ ironic_nodes }}"
when: "{{ item['Provisioning State'] == 'enroll' }}"
when: item['Provisioning State'] == 'enroll'
environment: "{{ openstack_auth_env }}"
- name: Ensure ironic nodes are available
@ -48,7 +48,7 @@
source {{ venv }}/bin/activate &&
openstack baremetal node provide {{ item['Name'] }}
with_items: "{{ ironic_nodes }}"
when: "{{ item['Provisioning State'] in ['enroll', 'manageable'] }}"
when: item['Provisioning State'] in ['enroll', 'manageable']
environment: "{{ openstack_auth_env }}"
- name: Get a list of ironic nodes
@ -77,4 +77,4 @@
Failed to make compute node {{ item['Name'] }} available in ironic.
Provisioning state is {{ item['Provisioning State'] }}.
with_items: "{{ ironic_nodes }}"
when: "{{ item['Provisioning State'] != 'available' }}"
when: item['Provisioning State'] != 'available'

View File

@ -13,7 +13,7 @@
set_fact:
container_image_sets:
- regexes: "{{ container_image_regexes }}"
when: "{{ container_image_regexes != '' }}"
when: container_image_regexes != ''
- name: Display the regexes for container images that will be built
debug:
@ -31,6 +31,12 @@
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: True
- name: Login to docker registry
docker_login:
username: "{{ kolla_docker_registry_username }}"
password: "{{ kolla_docker_registry_password }}"
when: kolla_docker_registry_username is not none and kolla_docker_registry_password is not none
- name: Ensure Kolla container images are built
shell: >

View File

@ -19,9 +19,9 @@
with_dict: "{{ idrac_network_ips }}"
# Don't add hosts that already exist.
when:
- "{{ item.key not in groups['all'] }}"
- "{{ item.key | replace('-idrac', '') not in groups['all'] }}"
- "{{ not compute_node_limit or item.key | replace('-idrac', '') in compute_node_limit_list }}"
- item.key not in groups['all']
- item.key | replace('-idrac', '') not in groups['all']
- not compute_node_limit or item.key | replace('-idrac', '') in compute_node_limit_list
run_once: True
- name: Ensure compute nodes are present in the Ansible inventory
@ -35,5 +35,5 @@
set_fact:
ipmi_address: "{{ idrac_network_ips[inventory_hostname] }}"
# Don't add hosts that already exist.
when: "{{ not compute_node_limit or inventory_hostname in compute_node_limit_list }}"
when: not compute_node_limit or inventory_hostname in compute_node_limit_list
run_once: True

View File

@ -8,4 +8,4 @@
- role: dell-switch-bmp
# This is the Nginx web server on the seed node.
dell_switch_bmp_http_base_url: "http://{{ provision_oc_net_name | net_ip }}:8080"
when: "{{ dell_switch_bmp_images }}"
when: dell_switch_bmp_images

View File

@ -24,14 +24,14 @@
module: copy
content: "{{ hostvars[inventory_hostname] | to_nice_yaml }}"
dest: "{{ dump_path }}/{{ inventory_hostname }}.yml"
when: "{{ dump_var_name is not defined }}"
when: dump_var_name is not defined
- name: Write host variable to file
local_action:
module: copy
content: "{{ hostvars[inventory_hostname][dump_var_name] | to_nice_yaml }}"
dest: "{{ dump_path }}/{{ inventory_hostname }}.yml"
when: "{{ dump_var_name is defined }}"
when: dump_var_name is defined
# - name: Write merged config to file
# local_action:

View File

@ -58,6 +58,12 @@ kolla_install_type: "binary"
# Docker namespace to use for Kolla images.
kolla_docker_namespace: "stackhpc"
# Username to use to access a docker registry.
kolla_docker_registry_username:
# Password to use to access a docker registry.
kolla_docker_registry_password:
# Kolla OpenStack release version. This should be a Docker image tag.
kolla_openstack_release: "5.0.0"
@ -234,6 +240,7 @@ kolla_openstack_logging_debug: "False"
# Kolla feature flag configuration.
kolla_enable_barbican: "no"
kolla_enable_ceph: "no"
kolla_enable_glance: "yes"
kolla_enable_haproxy: "yes"
kolla_enable_heat: "yes"

View File

@ -2,10 +2,9 @@
###############################################################################
# Neutron configuration.
# List of Neutron ML2 mechanism drivers to use.
# List of Neutron ML2 mechanism drivers to use. If unset the kolla-ansible
# defaults will be used.
kolla_neutron_ml2_mechanism_drivers:
- openvswitch
- genericswitch
# List of Neutron ML2 type drivers to use.
kolla_neutron_ml2_type_drivers:

View File

@ -51,7 +51,7 @@
parents:
- "interface {{ switch_interface_name }}"
delegate_to: localhost
when: "{{ switch_type == 'dellos6' }}"
when: switch_type == 'dellos6'
# The tasks in this block are delegated to the controller.
- block:
@ -67,8 +67,8 @@
delegate_to: "{{ delegate_host }}"
register: arp_result
failed_when:
- "{{ arp_result | failed }}"
- "{{ 'No ARP entry for ' ~ idrac_default_ip not in arp_result.stdout }}"
- arp_result | failed
- "'No ARP entry for ' ~ idrac_default_ip not in arp_result.stdout"
# Ansible's until keyword seems to not work nicely with failed_when, causing
# the task to fail even though we have specified failed_when: False.
@ -103,7 +103,7 @@
- name: Set a fact about whether the iDRAC default IP was reachable
set_fact:
idrac_bootstrap_failure: "{{ ping_result.results[0] }}"
when: "{{ ping_result.results[0].rc != 0 }}"
when: ping_result.results[0].rc != 0
- name: Ensure IPMI is enabled on the iDRAC
command: >
@ -117,7 +117,7 @@
loop_control:
loop_var: delegate_host
delegate_to: "{{ delegate_host }}"
when: "{{ not idrac_bootstrap_failure }}"
when: not idrac_bootstrap_failure
register: racadm_ipmi_enable
failed_when: False
@ -125,8 +125,8 @@
set_fact:
idrac_bootstrap_failure: "{{ racadm_ipmi_enable.results[0] }}"
when:
- "{{ not idrac_bootstrap_failure }}"
- "{{ racadm_ipmi_enable.results[0].rc != 0 }}"
- not idrac_bootstrap_failure
- racadm_ipmi_enable.results[0].rc != 0
- name: Ensure the iDRAC IP address is configured
command: >
@ -140,8 +140,7 @@
loop_control:
loop_var: delegate_host
delegate_to: "{{ delegate_host }}"
when:
- "{{ not idrac_bootstrap_failure }}"
when: not idrac_bootstrap_failure
register: racadm_setniccfg
failed_when: False
@ -149,14 +148,14 @@
set_fact:
idrac_bootstrap_failure: "{{ racadm_setniccfg.results[0] }}"
when:
- "{{ not idrac_bootstrap_failure }}"
- "{{ racadm_setniccfg.results[0].rc != 0 }}"
- not idrac_bootstrap_failure
- racadm_setniccfg.results[0].rc != 0
- name: Append the iDRAC to the successful list on success
set_fact:
idrac_bootstrap_success: >
{{ idrac_bootstrap_success + [idrac_port_description] }}
when: "{{ not idrac_bootstrap_failure }}"
when: not idrac_bootstrap_failure
- name: Append the iDRAC to the failed list on failure
set_fact:
@ -164,7 +163,7 @@
{{ idrac_bootstrap_failed +
[{"port description": idrac_port_description,
"failure": idrac_bootstrap_failure}] }}
when: "{{ idrac_bootstrap_failure }}"
when: idrac_bootstrap_failure
run_once: True
# Ensure we reconfigure the switch interface.
@ -177,14 +176,13 @@
parents:
- "interface {{ switch_interface_name }}"
delegate_to: localhost
when:
- "{{ switch_type == 'dellos6' }}"
when: switch_type == 'dellos6'
when: "{{ idrac_bootstrap_required }}"
when: idrac_bootstrap_required
- name: Append the iDRAC to the unchanged list when unchanged
set_fact:
idrac_bootstrap_unchanged: >
{{ idrac_bootstrap_unchanged + [idrac_port_description] }}
run_once: True
when: "{{ not idrac_bootstrap_required }}"
when: not idrac_bootstrap_required

View File

@ -33,7 +33,7 @@
fail:
msg: >
The iDRAC bootstrap process currently only supports DellOS6 switches.
when: "{{ switch_type not in supported_switch_types }}"
when: switch_type not in supported_switch_types
# 1. Create a VLAN interface on the controller node with IP in the iDRAC
# default subnet.
@ -119,7 +119,7 @@
dell_switch_config:
- "vlan {{ idrac_bootstrap_vlan }}"
dell_switch_interface_config: "{{ switch_interface_config_bootstrap }}"
when: "{{ switch_interface_config_bootstrap != {} }}"
when: switch_interface_config_bootstrap != {}
# 3. For each iDRAC switch port in turn, flip to the temporary VLAN and
# configure the iDRAC's IP address, before returning the port to the iDRAC
@ -186,7 +186,7 @@
dell_switch_config:
- "no vlan {{ idrac_bootstrap_vlan }}"
dell_switch_interface_config: "{{ switch_interface_config_bootstrap }}"
when: "{{ switch_interface_config_bootstrap != {} }}"
when: switch_interface_config_bootstrap != {}
# 5. Remove the VLAN interface on the controller node.
- name: Ensure the controller bootstrap network is cleaned up
@ -204,7 +204,7 @@
- name: Display a list of failed iDRACs
set_fact:
idrac_bootstrap_failed_port_descriptions: "{{ idrac_bootstrap_failed | map(attribute='port description') | list }}"
when: "{{ idrac_bootstrap_failed | length > 0 }}"
when: idrac_bootstrap_failed | length > 0
- name: Display a list of successfully bootstrapped iDRACs
debug:
@ -217,16 +217,16 @@
- name: Display a list of failed iDRACs
debug:
var: idrac_bootstrap_failed_port_descriptions
when: "{{ idrac_bootstrap_failed | length > 0 }}"
when: idrac_bootstrap_failed | length > 0
- name: Display a list of failed iDRACs with debug output for the failed tasks
debug:
var: idrac_bootstrap_failed
when: "{{ idrac_bootstrap_failed | length > 0 }}"
when: idrac_bootstrap_failed | length > 0
- name: Fail if there were any iDRAC bootstrapping failures
fail:
msg: >
One or more iDRACs failed to bootstrap, see the list above for
details.
when: "{{ idrac_bootstrap_failed | length > 0 }}"
when: idrac_bootstrap_failed | length > 0

View File

@ -22,7 +22,7 @@
}]
}}
with_items: "{{ network_interfaces }}"
when: "{{ item|net_cidr != None }}"
when: item|net_cidr != None
roles:
- role: ip-allocation
ip_allocation_filename: "{{ kayobe_config_path }}/network-allocation.yml"

View File

@ -46,8 +46,8 @@
({{ item.description }}) is invalid. Value:
"{{ hostvars[inventory_hostname][item.var_name] | default('<undefined>') }}".
when:
- "{{ item.required | bool }}"
- "{{ hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name] }}"
- item.required | bool
- hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name]
with_items:
- var_name: "kolla_api_interface"
description: "API network interface name"
@ -58,7 +58,7 @@
- var_name: "kolla_inspector_dnsmasq_interface"
description: "Bare metal introspection network interface name"
required: "{{ kolla_enable_ironic }}"
when: "{{ groups['controllers'] | length > 0 }}"
when: groups['controllers'] | length > 0
tags:
- config
- config-validation
@ -71,7 +71,7 @@
kolla_internal_fqdn: "{{ internal_net_name | net_fqdn or internal_net_name | net_vip_address }}"
kolla_external_vip_address: "{{ public_net_name | net_vip_address }}"
kolla_external_fqdn: "{{ public_net_name | net_fqdn or public_net_name | net_vip_address }}"
when: "{{ kolla_enable_haproxy | bool }}"
when: kolla_enable_haproxy | bool
- name: Set facts containing the VIP addresses and FQDNs
set_fact:
@ -79,8 +79,7 @@
kolla_internal_fqdn: "{{ internal_net_name | net_ip(network_host) }}"
kolla_external_vip_address: "{{ public_net_name | net_ip(network_host) }}"
kolla_external_fqdn: "{{ public_net_name | net_ip(network_host) }}"
when:
- "{{ not kolla_enable_haproxy | bool }}"
when: not kolla_enable_haproxy | bool
- name: Set facts containing the network host interfaces
set_fact:
@ -107,7 +106,7 @@
# FIXME: Network host does not have an IP on this network.
- "{{ provision_wl_net_name }}"
- "{{ external_net_name }}"
when: "{{ item in hostvars[network_host].network_interfaces }}"
when: item in hostvars[network_host].network_interfaces
- name: Set facts containing the Neutron bridge and interface names
set_fact:
@ -126,8 +125,8 @@
({{ item.description }}) is invalid. Value:
"{{ hostvars[inventory_hostname][item.var_name] | default('<undefined>') }}".
when:
- "{{ item.required | bool }}"
- "{{ hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name] }}"
- item.required | bool
- hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name]
with_items:
- var_name: "kolla_internal_vip_address"
description: "Internal API VIP address"
@ -158,8 +157,8 @@
({{ item.0.description }}) is invalid. Value:
"{{ item.1 | default('<undefined>') }}".
when:
- "{{ item.0.required | bool }}"
- "{{ item.1 is not defined or not item.1 }}"
- item.0.required | bool
- item.1 is not defined or not item.1
with_subelements:
- - var_name: "kolla_neutron_bridge_names"
value: "{{ kolla_neutron_bridge_names }}"
@ -170,7 +169,7 @@
description: "List of Neutron interface names"
required: True
- value
when: "{{ groups['network'] | length > 0 }}"
when: groups['network'] | length > 0
tags:
- config
- config-validation
@ -188,13 +187,13 @@
({{ item.description }}) is invalid. Value:
"{{ hostvars[inventory_hostname][item.var_name] | default('<undefined>') }}".
when:
- "{{ item.required | bool }}"
- "{{ hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name] }}"
- item.required | bool
- hostvars[inventory_hostname][item.var_name] is not defined or not hostvars[inventory_hostname][item.var_name]
with_items:
- var_name: "kolla_bifrost_network_interface"
description: "Bifrost network interface name"
required: True
when: "{{ groups['seed'] | length > 0 }}"
when: groups['seed'] | length > 0
tags:
- config
- config-validation

View File

@ -18,4 +18,4 @@
# If a service is not installed, the ansible service module will fail
# with this error message.
- '"Could not find the requested service" not in result.msg'
when: "{{ kolla_enable_ironic | bool }}"
when: kolla_enable_ironic | bool

View File

@ -99,7 +99,7 @@
set_fact:
kolla_extra_config: "{{ kolla_extra_config | combine({item.item.name: lookup('template', '{{ item.stat.path }}')}) }}"
with_items: "{{ stat_result.results }}"
when: "{{ item.stat.exists }}"
when: item.stat.exists
- name: Validate switch configuration for Neutron ML2 genericswitch driver
fail:
@ -110,14 +110,12 @@
{{ switch_type_to_device_type.keys() | join(', ') }}.
with_items: "{{ kolla_neutron_ml2_generic_switch_hosts }}"
when: >
{{
item not in hostvars or
'switch_type' not in hostvars[item] or
hostvars[item].switch_type not in switch_type_to_device_type or
'ansible_host' not in hostvars[item] or
'ansible_user' not in hostvars[item] or
'ansible_ssh_pass' not in hostvars[item]
}}
item not in hostvars or
'switch_type' not in hostvars[item] or
hostvars[item].switch_type not in switch_type_to_device_type or
'ansible_host' not in hostvars[item] or
'ansible_user' not in hostvars[item] or
'ansible_ssh_pass' not in hostvars[item]
tags:
- config-validation

View File

@ -9,12 +9,11 @@
{{ item.vgname }}. Ensure that each volume group in 'lvm_groups'
has a valid 'disks' list.
with_items: "{{ lvm_groups | default([]) }}"
when:
- "{{ not item.disks | default([]) or 'changeme' in item.disks | default([]) }}"
when: not item.disks | default([]) or 'changeme' in item.disks | default([])
roles:
- role: mrlesmithjr.manage-lvm
manage_lvm: True
become: True
when:
- "{{ lvm_groups is defined}}"
- "{{ lvm_groups | length > 0 }}"
- lvm_groups is defined
- lvm_groups | length > 0

View File

@ -15,7 +15,7 @@
Network interface validation failed - no interface configured for
{{ item }}. This should be configured via '{{ item }}_interface'.
with_items: "{{ ether_interfaces }}"
when: "{{ not item | net_interface }}"
when: not item | net_interface
- name: Validate bridge interface configuration
fail:
@ -23,7 +23,7 @@
Bridge interface validation failed - no interface configured for
{{ item }}. This should be configured via '{{ item }}_interface'.
with_items: "{{ bridge_interfaces }}"
when: "{{ not item | net_interface }}"
when: not item | net_interface
- name: Validate bond interface configuration
fail:
@ -31,7 +31,7 @@
Bond interface validation failed - no interface configured for
{{ item }}. This should be configured via '{{ item }}_interface'.
with_items: "{{ bond_interfaces }}"
when: "{{ not item | net_interface }}"
when: not item | net_interface
tags:
- config-validation
@ -43,9 +43,9 @@
become: True
register: nm_result
failed_when:
- "{{ nm_result | failed }}"
- nm_result | failed
# Ugh, Ansible's service module doesn't handle uninstalled services.
- "{{ 'Could not find the requested service' not in nm_result.msg }}"
- "'Could not find the requested service' not in nm_result.msg"
roles:
- role: ahuffman.resolv
@ -89,7 +89,7 @@
with_items:
- "{{ provision_wl_net_name }}"
- "{{ external_net_name }}"
when: "{{ item in network_interfaces }}"
when: item in network_interfaces
- name: Update a fact containing veth interfaces
set_fact:

12
ansible/node-exporter.yml Normal file
View File

@ -0,0 +1,12 @@
---
# Deploy/pull/reconfigure/upgrade the Prometheus Node Exporter.
#
# Follows kolla-ansible service deployment patterns.
#
# Variables:
# action: One of deploy, destroy, pull, reconfigure, upgrade
- name: Ensure Node Exporter is deployed
hosts: all
roles:
- role: node-exporter

View File

@ -21,8 +21,8 @@
{% if bmc_type is undefined %}is not defined{% else %}{{ bmc_type }}
is not supported{% endif %}.
when:
- "{{ bios_config or raid_config }}"
- "{{ bmc_type is undefined or bmc_type not in supported_bmc_types }}"
- bios_config or raid_config
- bmc_type is undefined or bmc_type not in supported_bmc_types
- name: Group overcloud hosts by their BMC type
group_by:
@ -75,7 +75,7 @@
# NOTE: Without this, the seed's ansible_host variable will not be
# respected when using delegate_to.
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
when: "{{ bios_or_raid_change | bool }}"
when: bios_or_raid_change | bool
roles:
- role: stackhpc.drac
@ -84,7 +84,7 @@
drac_password: "{{ ipmi_password }}"
drac_bios_config: "{{ bios_config }}"
drac_raid_config: "{{ raid_config }}"
when: "{{ bios_or_raid_change | bool }}"
when: bios_or_raid_change | bool
tasks:
- name: Unset the overcloud nodes' maintenance mode
@ -107,4 +107,4 @@
# NOTE: Without this, the seed's ansible_host variable will not be
# respected when using delegate_to.
ansible_host: "{{ hostvars[seed_host].ansible_host | default(seed_host) }}"
when: "{{ bios_or_raid_change | bool }}"
when: bios_or_raid_change | bool

View File

@ -63,7 +63,7 @@
Ironic node for {{ inventory_hostname }} is in an unexpected
initial provision state: {{ initial_provision_state }}. Expected
states are: {{ deprovisionable_states | join(',') }}.
when: "{{ initial_provision_state not in deprovisionable_states }}"
when: initial_provision_state not in deprovisionable_states
- name: Ensure the the ironic node is deprovisioned
command: >
@ -82,7 +82,7 @@
until: "{{ delete_result | success or 'is locked by host' in delete_result.stdout }}"
retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}"
when: "{{ initial_provision_state != 'available' }}"
when: initial_provision_state != 'available'
delegate_to: "{{ seed_host }}"
vars:
# NOTE: Without this, the seed's ansible_host variable will not be
@ -111,8 +111,8 @@
retries: "{{ wait_available_timeout // wait_available_interval }}"
delay: "{{ wait_available_interval }}"
when:
- "{{ wait_available | bool }}"
- "{{ initial_provision_state != 'available' }}"
- wait_available | bool
- initial_provision_state != 'available'
changed_when: False
delegate_to: "{{ seed_host }}"
vars:
@ -124,8 +124,8 @@
set_fact:
final_provision_state: "{{ show_result.stdout_lines[1] }}"
when:
- "{{ wait_available | bool }}"
- "{{ initial_provision_state != 'available' }}"
- wait_available | bool
- initial_provision_state != 'available'
- name: Fail if the ironic node is not available
fail:
@ -134,6 +134,6 @@
provision state after deprovisioning. Ironic provision state:
{{ final_provision_state }}. Expected: available.
when:
- "{{ wait_available | bool }}"
- "{{ initial_provision_state != 'available' }}"
- "{{ final_provision_state != 'available' }}"
- wait_available | bool
- initial_provision_state != 'available'
- final_provision_state != 'available'

View File

@ -10,3 +10,4 @@
- include: docker-registry.yml
- include: inspection-store.yml
- include: opensm.yml
- include: node-exporter.yml

View File

@ -56,7 +56,7 @@
Ironic node for {{ inventory_hostname }} is in an unexpected
initial provision state: {{ initial_provision_state }}. Expected
states are: {{ inspectable_states | join(',') }}.
when: "{{ initial_provision_state not in inspectable_states }}"
when: initial_provision_state not in inspectable_states
- name: Ensure the ironic node is manageable
command: >
@ -75,7 +75,7 @@
until: "{{ manage_result | success or 'is locked by host' in manage_result.stdout }}"
retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}"
when: "{{ initial_provision_state != 'manageable' }}"
when: initial_provision_state != 'manageable'
delegate_to: "{{ seed_host }}"
vars:
# NOTE: Without this, the seed's ansible_host variable will not be
@ -125,8 +125,7 @@
until: "{{ not show_result.stdout_lines[1:] | intersect(inspecting_states) }}"
retries: "{{ wait_inspected_timeout // wait_inspected_interval }}"
delay: "{{ wait_inspected_interval }}"
when:
- "{{ wait_inspected | bool }}"
when: wait_inspected | bool
changed_when: False
delegate_to: "{{ seed_host }}"
vars:
@ -137,8 +136,7 @@
- name: Set a fact containing the final provision state
set_fact:
final_provision_state: "{{ show_result.stdout_lines[1] }}"
when:
- "{{ wait_inspected | bool }}"
when: wait_inspected | bool
- name: Fail if any of the nodes are not manageable
fail:
@ -147,5 +145,5 @@
provision state after inspecting. Ironic provision state:
{{ final_provision_state }}. Expected: manageable.
when:
- "{{ wait_inspected | bool }}"
- "{{ final_provision_state != 'manageable' }}"
- wait_inspected | bool
- final_provision_state != 'manageable'

View File

@ -62,8 +62,8 @@
{% if item in openstack_auth %}empty{% else %}not present{% endif %}
in openstack_auth. Have you sourced the environment file?
when:
- "{{ openstack_auth_type == 'password' }}"
- "{{ item not in openstack_auth or not openstack_auth[item] }}"
- openstack_auth_type == 'password'
- item not in openstack_auth or not openstack_auth[item]
with_items: "{{ openstack_auth_password_required_params }}"
tags:
- config-validation
@ -79,7 +79,7 @@
[{'host': item.key,
'interface_config': item.value.switch_interface_config.items()}] }}
with_dict: "{{ hostvars }}"
when: "{{ item.key in groups[inspector_dell_switch_lldp_workaround_group] }}"
when: item.key in groups[inspector_dell_switch_lldp_workaround_group]
- name: Update a fact containing Ironic Inspector rules
set_fact:
@ -90,11 +90,11 @@
- "{{ all_switch_interfaces }}"
- interface_config
when:
- "{{ item.1.1.description is defined }}"
- item.1.1.description is defined
# Ignore VLAN interfaces.
- "{{ 'vlan' not in item.1.0 }}"
- "'vlan' not in item.1.0"
# Ignore trunk links.
- "{{ '-trunk' not in item.1.1.description }}"
- "'-trunk' not in item.1.1.description"
roles:
- role: ironic-inspector-rules

View File

@ -12,8 +12,8 @@
{% if item in openstack_auth %}empty{% else %}not present{% endif %}
in openstack_auth. Have you sourced the environment file?
when:
- "{{ openstack_auth_type == 'password' }}"
- "{{ item not in openstack_auth or not openstack_auth[item] }}"
- openstack_auth_type == 'password'
- item not in openstack_auth or not openstack_auth[item]
with_items: "{{ openstack_auth_password_required_params }}"
tags:
- config-validation

View File

@ -24,4 +24,4 @@
type: raw
os_images_git_elements: "{{ ipa_build_dib_git_elements }}"
os_images_upload: False
when: "{{ ipa_build_images | bool }}"
when: ipa_build_images | bool

View File

@ -66,7 +66,7 @@
Ironic node for {{ inventory_hostname }} is in an unexpected
initial provision state: {{ initial_provision_state }}. Expected
states are: {{ provisionable_states | join(',') }}.
when: "{{ initial_provision_state not in provisionable_states }}"
when: initial_provision_state not in provisionable_states
- name: Ensure the ironic node is manageable
command: >
@ -85,7 +85,7 @@
until: "{{ manage_result | success or 'is locked by host' in manage_result.stdout }}"
retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}"
when: "{{ initial_provision_state == 'enroll' }}"
when: initial_provision_state == 'enroll'
delegate_to: "{{ seed_host }}"
vars:
# NOTE: Without this, the seed's ansible_host variable will not be
@ -109,7 +109,7 @@
until: "{{ provide_result | success or 'is locked by host' in provide_result.stdout }}"
retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}"
when: "{{ initial_provision_state in ['enroll', 'manageable'] }}"
when: initial_provision_state in ['enroll', 'manageable']
delegate_to: "{{ seed_host }}"
vars:
# NOTE: Without this, the seed's ansible_host variable will not be
@ -120,7 +120,7 @@
set_fact:
bifrost_limit: "{{ bifrost_limit + [item] }}"
with_items: "{{ play_hosts }}"
when: "{{ hostvars[item].initial_provision_state != 'active' }}"
when: hostvars[item].initial_provision_state != 'active'
run_once: True
- name: Ensure the ironic nodes are provisioned
@ -135,7 +135,7 @@
-e @/etc/bifrost/bifrost.yml
-e @/etc/bifrost/dib.yml
--limit {{ bifrost_limit | join(':') }}'
when: "{{ bifrost_limit }}"
when: bifrost_limit
delegate_to: "{{ seed_host }}"
vars:
# NOTE: Without this, the seed's ansible_host variable will not be
@ -167,8 +167,8 @@
retries: "{{ wait_active_timeout // wait_active_interval }}"
delay: "{{ wait_active_interval }}"
when:
- "{{ wait_active | bool }}"
- "{{ initial_provision_state != 'active' }}"
- wait_active | bool
- initial_provision_state != 'active'
changed_when: False
delegate_to: "{{ seed_host }}"
vars:
@ -180,8 +180,8 @@
set_fact:
final_provision_state: "{{ show_result.stdout_lines[1] }}"
when:
- "{{ wait_active | bool }}"
- "{{ initial_provision_state != 'active' }}"
- wait_active | bool
- initial_provision_state != 'active'
- name: Fail if any of the nodes are not available
fail:
@ -190,9 +190,9 @@
provision state after provisioning. Ironic provision state:
{{ final_provision_state }}. Expected: active.
when:
- "{{ wait_active | bool }}"
- "{{ initial_provision_state != 'active' }}"
- "{{ final_provision_state != 'active' }}"
- wait_active | bool
- initial_provision_state != 'active'
- final_provision_state != 'active'
- name: Wait for SSH access to the nodes
local_action:
@ -201,5 +201,4 @@
port: 22
state: started
timeout: "{{ wait_ssh_timeout }}"
when:
- "{{ wait_ssh | bool }}"
when: wait_ssh | bool

View File

@ -41,7 +41,7 @@
set_fact:
switch_interface_config: >
{{ switch_interface_config | combine(switch_interface_config_discovery) }}
when: "{{ physical_network_enable_discovery | bool }}"
when: physical_network_enable_discovery | bool
- name: Restrict switch interfaces to requested subset by name
set_fact:

View File

@ -10,8 +10,8 @@
{% if item in openstack_auth %}empty{% else %}not present{% endif %}
in openstack_auth. Have you sourced the environment file?
when:
- "{{ openstack_auth_type == 'password' }}"
- "{{ item not in openstack_auth or not openstack_auth[item] }}"
- openstack_auth_type == 'password'
- item not in openstack_auth or not openstack_auth[item]
with_items: "{{ openstack_auth_password_required_params }}"
tags:
- config-validation

View File

@ -4,11 +4,11 @@
module: dellos6_config
provider: "{{ dell_switch_provider }}"
src: dellos6-config.j2
when: "{{ dell_switch_type == 'dellos6' }}"
when: dell_switch_type == 'dellos6'
- name: Ensure DellOS9 switches are configured
local_action:
module: dellos9_config
provider: "{{ dell_switch_provider }}"
src: dellos9-config.j2
when: "{{ dell_switch_type == 'dellos9' }}"
when: dell_switch_type == 'dellos9'

View File

@ -16,7 +16,7 @@
- name: Set a fact to determine whether we are running locally
set_fact:
is_local: "{{ lookup('pipe', 'hostname') in [ansible_hostname, ansible_nodename] }}"
when: "{{ selinux_result | changed }}"
when: selinux_result | changed
# Any SSH connection errors cause ansible to fail the task. We therefore
# perform a manual SSH connection and allow the command to fail.
@ -29,18 +29,18 @@
sudo shutdown -r now "Applying SELinux changes"
register: reboot_result
failed_when:
- "{{ reboot_result | failed }}"
- "{{ 'closed by remote host' not in reboot_result.stderr }}"
- reboot_result | failed
- "'closed by remote host' not in reboot_result.stderr"
when:
- "{{ selinux_result | changed }}"
- "{{ not is_local | bool }}"
- selinux_result | changed
- not is_local | bool
- name: Reboot the system to apply SELinux changes (local)
command: shutdown -r now "Applying SELinux changes"
become: True
when:
- "{{ selinux_result | changed }}"
- "{{ is_local | bool }}"
- selinux_result | changed
- is_local | bool
# If we're running this locally we won't get here.
- name: Wait for the system to boot up (remote)
@ -53,5 +53,5 @@
delay: 10
timeout: "{{ disable_selinux_reboot_timeout }}"
when:
- "{{ selinux_result | changed }}"
- "{{ not is_local | bool }}"
- selinux_result | changed
- not is_local | bool

View File

@ -81,11 +81,11 @@
Not configuring docker storage in {{ docker_storage_driver }} mode as
loopback-backed containers or images exist.
when:
- "{{ 'Data loop file' in docker_info.stdout }}"
- "{{ 'Images: 0' not in docker_info.stdout }}"
- "{{ 'Containers: 0' not in docker_info.stdout }}"
- "'Data loop file' in docker_info.stdout"
- "'Images: 0' not in docker_info.stdout"
- "'Containers: 0' not in docker_info.stdout"
- include: storage.yml
when: "{{ 'Data loop file' in docker_info.stdout }}"
when: "'Data loop file' in docker_info.stdout"
- include: config.yml

View File

@ -4,7 +4,7 @@
msg: >
Unexpected requested boot mode {{ drac_boot_mode }}. Expected one of
{{ drac_boot_mode_valid_modes | join(', ') }}.
when: "{{ drac_boot_mode | lower not in drac_boot_mode_valid_modes }}"
when: drac_boot_mode | lower not in drac_boot_mode_valid_modes
- name: Check the boot mode
raw: "racadm get BIOS.BiosBootSettings.BootMode"
@ -23,7 +23,7 @@
msg: >
Unexpected current boot mode {{ current_boot_mode }}. Expected one of
{{ drac_boot_mode_valid_modes | join(', ') }}.
when: "{{ current_boot_mode not in drac_boot_mode_valid_modes }}"
when: current_boot_mode not in drac_boot_mode_valid_modes
- block:
- name: Set the boot mode
@ -61,4 +61,4 @@
retries: "{{ drac_boot_mode_timeout // drac_boot_mode_interval }}"
delay: "{{ drac_boot_mode_interval }}"
when: "{{ current_boot_mode != drac_boot_mode }}"
when: current_boot_mode != drac_boot_mode

View File

@ -25,8 +25,8 @@
Unable to determine the boot mode. Got: {{ result.stdout }}. Expected
bios or uefi.
when:
- "{{ not boot_mode_is_bios }}"
- "{{ not boot_mode_is_uefi }}"
- not boot_mode_is_bios
- not boot_mode_is_uefi
- name: Check the BIOS boot sequence
raw: "racadm get BIOS.BiosBootSettings.{% if boot_mode_is_uefi %}Uefi{% endif %}BootSeq"
@ -46,7 +46,7 @@
msg: >
There is a pending boot sequence configuration change. Please
apply this change before continuing.
when: "{{ 'Pending' in current_boot_sequence }}"
when: "'Pending' in current_boot_sequence"
- block:
- name: Check the NICs' boot protocol
@ -75,12 +75,12 @@
There is a pending NIC boot protocol configuration change for
NIC {{ item.nic }}. Please apply this before continuing.
with_items: "{{ nic_boot_protos }}"
when: "{{ 'Pending' in item.current }}"
when: "'Pending' in item.current"
- name: Ensure NIC boot protocol is configured
raw: "racadm set Nic.NICConfig.{{ item.nic }}.LegacyBootProto {{ item.required }}"
with_items: "{{ nic_boot_protos }}"
when: "{{ item.current != item.required }}"
when: item.current != item.required
register: result
failed_when: "'ERROR' in result.stdout"
until: "{{ drac_pxe_busy_message not in result.stdout }}"
@ -90,7 +90,7 @@
- name: Ensure NIC configuration jobs are created
raw: "racadm jobqueue create NIC.Integrated.1-{{ item.nic }}-1 -s TIME_NOW"
with_items: "{{ nic_boot_protos }}"
when: "{{ item.current != item.required }}"
when: item.current != item.required
register: result
failed_when: "'ERROR' in result.stdout"
until: "{{ drac_pxe_busy_message not in result.stdout }}"
@ -101,9 +101,9 @@
set_fact:
job_ids: "{{ job_ids + [item.stdout_lines[-1].split()[-1]] }}"
with_items: "{{ result.results }}"
when: "{{ not item.skipped }}"
when: not item.skipped
when: "{{ boot_mode_is_bios }}"
when: boot_mode_is_bios
- block:
- name: Check the UEFI PXE interface
@ -122,7 +122,7 @@
msg: >
There is a pending UEFI PXE interface configuration change.
Please apply this before continuing.
when: "{{ 'Pending' in current_pxe_interface }}"
when: "'Pending' in current_pxe_interface"
- name: Ensure UEFI PXE device is configured
raw: "racadm set BIOS.PxeDev1Settings.PxeDev1Interface NIC.Integrated.1-{{ drac_pxe_interface }}-1"
@ -131,14 +131,14 @@
until: "{{ drac_pxe_busy_message not in result.stdout }}"
retries: "{{ drac_pxe_retries }}"
delay: "{{ drac_pxe_interval }}"
when: "{{ current_pxe_interface != required_pxe_interface }}"
when: current_pxe_interface != required_pxe_interface
- name: Set a fact to trigger a BIOS configuration job
set_fact:
bios_job_required: True
when: "{{ current_pxe_interface != required_pxe_interface }}"
when: current_pxe_interface != required_pxe_interface
when: "{{ boot_mode_is_uefi }}"
when: boot_mode_is_uefi
- name: Ensure boot sequence is configured
raw: "racadm set BIOS.BiosBootSettings.BootSeq {{ drac_pxe_bios_boot_sequence }}"
@ -147,12 +147,12 @@
until: "{{ drac_pxe_busy_message not in result.stdout }}"
retries: "{{ drac_pxe_retries }}"
delay: "{{ drac_pxe_interval }}"
when: "{{ current_boot_sequence != required_boot_sequence }}"
when: current_boot_sequence != required_boot_sequence
- name: Set a fact to trigger a BIOS configuration job
set_fact:
bios_job_required: True
when: "{{ current_boot_sequence != required_boot_sequence }}"
when: current_boot_sequence != required_boot_sequence
- name: Ensure BIOS configuration job is created
raw: "racadm jobqueue create BIOS.Setup.1-1 -s TIME_NOW"
@ -161,14 +161,14 @@
until: "{{ drac_pxe_busy_message not in result.stdout }}"
retries: "{{ drac_pxe_retries }}"
delay: "{{ drac_pxe_interval }}"
when: "{{ bios_job_required }}"
when: bios_job_required
- name: Set a fact containing the BIOS configuration job ID
set_fact:
# Format of the last line is:
# JOB_ID = <job ID>
job_ids: "{{ job_ids + [result.stdout_lines[-1].split()[-1]] }}"
when: "{{ bios_job_required }}"
when: bios_job_required
- name: Ensure server is rebooted
raw: "racadm serveraction powercycle"
@ -178,7 +178,7 @@
- name: Wait for the configuration jobs to complete
raw: "racadm jobqueue view -i {{ item }}"
with_items: "{{ job_ids }}"
when: "{{ job_ids | length > 0 }}"
when: job_ids | length > 0
register: result
failed_when: "'ERROR' in result.stdout"
until: "{{ 'Status=Completed' in result.stdout }}"

View File

@ -16,7 +16,7 @@
filename: "{{ ipa_images_kernel_name }}"
- url: "{{ ipa_images_ramdisk_url }}"
filename: "{{ ipa_images_ramdisk_name }}"
when: "{{ item.url != None }}"
when: item.url != None
- name: Compute the MD5 checksum of the Ironic Python Agent (IPA) images
stat:
@ -47,7 +47,7 @@
- name: Set a fact containing the Ironic Python Agent (IPA) kernel image checksum
set_fact:
ipa_images_kernel_checksum: "{{ openstack_image.checksum }}"
when: "{{ openstack_image != None }}"
when: openstack_image != None
- name: Gather facts about Ironic Python Agent (IPA) ramdisk image
os_image_facts:
@ -58,7 +58,7 @@
- name: Set a fact containing the Ironic Python Agent (IPA) ramdisk image checksum
set_fact:
ipa_images_ramdisk_checksum: "{{ openstack_image.checksum }}"
when: "{{ openstack_image != None }}"
when: openstack_image != None
- name: Ensure Ironic Python Agent (IPA) images are removed from Glance
os_image:
@ -74,8 +74,8 @@
checksum: "{{ ipa_images_checksum.results[1].stat.md5 }}"
glance_checksum: "{{ ipa_images_ramdisk_checksum | default }}"
when:
- "{{ item.glance_checksum != None }}"
- "{{ item.checksum != item.glance_checksum }}"
- item.glance_checksum != None
- item.checksum != item.glance_checksum
- name: Ensure Ironic Python Agent (IPA) images are registered with Glance
os_image:

View File

@ -182,7 +182,7 @@ def main():
overrides=dict(default={}, type='dict'),
sample=dict(default='/usr/share/kolla-ansible/etc_examples/kolla/passwords.yml', type='str'),
src=dict(default='/etc/kolla/passwords.yml', type='str'),
vault_password=dict(type='str'),
vault_password=dict(type='str', no_log=True),
virtualenv=dict(type='str'),
),
add_file_common_args=True,

View File

@ -23,14 +23,14 @@
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: True
when: "{{ kolla_ansible_ctl_install_type == 'source' }}"
when: kolla_ansible_ctl_install_type == 'source'
- name: Ensure Kolla Ansible source code checkout exists
git:
repo: "{{ kolla_ansible_source_url }}"
dest: "{{ kolla_ansible_source_path }}"
version: "{{ kolla_ansible_source_version }}"
when: "{{ kolla_ansible_ctl_install_type == 'source' }}"
when: kolla_ansible_ctl_install_type == 'source'
- name: Ensure the latest version of pip is installed
pip:
@ -61,7 +61,7 @@
# Required for kolla-genpwd.
- name: PyYAML
version: "3.12"
when: "{{ item.install | default(True) | bool }}"
when: item.install | default(True) | bool
# This is a workaround for the lack of a python package for libselinux-python
# on PyPI. Without using --system-site-packages to create the virtualenv, it

View File

@ -1,6 +1,6 @@
---
- include: install.yml
when: "{{ kolla_ansible_is_standalone | bool }}"
when: kolla_ansible_is_standalone | bool
tags:
- install

View File

@ -54,8 +54,11 @@ node_custom_config: "{{ kolla_node_custom_config_path }}"
#docker_registry: "172.16.0.10:4000"
docker_namespace: "{{ kolla_docker_namespace }}"
#docker_registry_username: "sam"
#docker_registry_password: "correcthorsebatterystaple"
{% if kolla_docker_namespace_username and kolla_docker_namespace_password %}
docker_registry_username: "{{ kolla_docker_registry_username }}"
docker_registry_password: "{{ kolla_docker_registry_password }}"
{% endif %}
####################

View File

@ -8,6 +8,11 @@
# ignore: Optional list of files to leave in the destination, even if disabled
# or unexpected.
kolla_openstack_custom_config:
# Ceph.
- src: "{{ kolla_extra_config_path }}/ceph"
dest: "{{ kolla_node_custom_config_path }}/ceph"
patterns: "*"
enabled: "{{ kolla_enable_ceph }}"
# Fluentd filters.
- src: "{{ kolla_extra_config_path }}//fluentd/filter"
dest: "{{ kolla_node_custom_config_path }}/fluentd/filter"

View File

@ -25,14 +25,14 @@
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: True
when: "{{ kolla_ctl_install_type == 'source' }}"
when: kolla_ctl_install_type == 'source'
- name: Ensure Kolla source code checkout exists
git:
repo: "{{ kolla_source_url }}"
dest: "{{ kolla_source_path }}"
version: "{{ kolla_source_version }}"
when: "{{ kolla_ctl_install_type == 'source' }}"
when: kolla_ctl_install_type == 'source'
- name: Ensure the latest version of pip is installed
pip:
@ -64,4 +64,4 @@
# Required for kolla-genpwd.
- name: PyYAML
version: "3.12"
when: "{{ item.install | default(True) | bool }}"
when: item.install | default(True) | bool

View File

@ -7,5 +7,5 @@
with_items: "{{ libvirt_vm_volumes }}"
register: volume_result
changed_when:
- "{{ volume_result | success }}"
- "{{ (volume_result.stdout | from_json).changed | default(True) }}"
- volume_result | success
- (volume_result.stdout | from_json).changed | default(True)

View File

@ -4,14 +4,14 @@
url: "{{ item }}"
dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
when: "{{ 'http' in item }}"
when: "'http' in item"
- name: Ensure local images are copied
copy:
src: "{{ item }}"
dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
when: "{{ 'http' not in item }}"
when: "'http' not in item"
- name: Ensure the VM volumes exist
script: >
@ -26,5 +26,5 @@
with_items: "{{ libvirt_vm_volumes }}"
register: volume_result
changed_when:
- "{{ volume_result | success }}"
- "{{ (volume_result.stdout | from_json).changed | default(True) }}"
- volume_result | success
- (volume_result.stdout | from_json).changed | default(True)

View File

@ -0,0 +1,45 @@
Prometheus Node Exporter
========================
This role can be used to configure a Prometheus node exporter running
in a Docker container.
Requirements
------------
The host executing the role has the following requirements:
* Docker engine
* ``docker-py >= 1.7.0``
Role Variables
--------------
``nodeexporter_enabled``: Whether the Node Exporter is enabled. Defaults to ``true``.
``nodeexporter_namespace``: Docker image namespace. Defaults to ``prom``.
``nodeexporter_image``: Docker image name.
``nodeexporter_tag``: Docker image tag. Defaults to ``latest``.
``nodeexporter_image_full``: Full docker image specification.
``nodeexporter_restart_policy``: Docker restart policy for Node Exporter container. Defaults
to ``unless-stopped``.
``nodeexporter_restart_retries``: Number of Docker restarts. Defaults to 10.
Dependencies
------------
None
Example Playbook
----------------
The following playbook configures Node Exporter.
---
- hosts: node-exporter
roles:
- role: node-exporter
Author Information
------------------
- Jonathan Davies (<jpds@protonmail.com>)

View File

@ -0,0 +1,31 @@
---
# Roughly follows kolla-ansible's service deployment patterns.
# Whether Node Exporter is enabled.
nodeexporter_enabled: false
# Service deployment definition.
nodeexporter_services:
nodeexporter:
container_name: nodeexporter
enabled: "{{ nodeexporter_enabled }}"
image: "{{ nodeexporter_image_full }}"
command: /bin/node_exporter --collector.procfs=/host/proc --collector.sysfs=/host/sys --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)"
privileged: True
read_only: True
volumes:
- "/proc:/host/proc"
- "/sys:/host/sys"
- "/:/rootfs"
- "/etc/hostname:/etc/host_hostname"
####################
# Docker
####################
nodeexporter_namespace: "prom"
nodeexporter_image: "{{ docker_registry ~ '/' if docker_registry | default else '' }}{{ nodeexporter_namespace }}/node-exporter"
nodeexporter_tag: "v0.15.0"
nodeexporter_image_full: "{{ nodeexporter_image }}:{{ nodeexporter_tag }}"
nodeexporter_restart_policy: "unless-stopped"
nodeexporter_restart_retries: 10

View File

@ -0,0 +1,14 @@
---
- name: Ensure node exporter container is running
docker_container:
image: "{{ item.value.image }}"
name: "{{ item.value.container_name }}"
command: "{{ item.value.command }}"
network_mode: "host"
privileged: "{{ item.value.privileged | default(omit) }}"
read_only: "{{ item.value.read_only | default(omit) }}"
restart_policy: "{{ nodeexporter_restart_policy }}"
restart_retries: "{{ nodeexporter_restart_retries }}"
state: "{{ (item.value.enabled and action != 'destroy') | ternary('started', 'absent') }}"
volumes: "{{ item.value.volumes }}"
with_dict: "{{ nodeexporter_services }}"

View File

@ -0,0 +1,25 @@
---
- include: deploy.yml
- name: Check whether Node Exporter volumes are present
command: docker volume inspect {{ volume }}
changed_when: False
with_subelements:
- "{{ nodeexporter_services }}"
- volumes
when: "'/' not in volume"
failed_when:
- volume_result.rc != 0
- "'No such volume' not in volume_result.stderr"
vars:
volume: "{{ item.1.split(':')[0] }}"
register: volume_result
- name: Ensure Node Exporter volumes are absent
command: docker volume rm {{ volume }}
with_items: "{{ volume_result.results }}"
when:
- not item | skipped
- item.rc == 0
vars:
volume: "{{ item.item.1.split(':')[0] }}"

View File

@ -0,0 +1,2 @@
---
- include: "{{ action }}.yml"

View File

@ -0,0 +1,10 @@
---
- name: Pulling Node Exporter container image
docker_image:
name: "{{ item.value.image }}"
repository: "{{ item.value.image }}"
state: present
with_dict: "{{ nodeexporter_services }}"
when:
- item.value.enabled
- action != 'destroy'

View File

@ -0,0 +1 @@
deploy.yml

View File

@ -0,0 +1,3 @@
---
- include: pull.yml
- include: deploy.yml

View File

@ -39,5 +39,5 @@
with_together:
- "{{ veth_result.results }}"
- "{{ peer_result.results }}"
when: "{{ ctl_result|changed or item[0]|changed or item[1]|changed }}"
when: ctl_result|changed or item[0]|changed or item[1]|changed
become: True

View File

@ -36,7 +36,7 @@
notify:
- Copy Ironic Python Agent images into /httpboot
become: True
when: "{{ ipa_build_images | bool }}"
when: ipa_build_images | bool
handlers:
- name: Copy Ironic Python Agent images into /httpboot

View File

@ -11,7 +11,7 @@
msg: >
There should be exactly one host in the seed group. There are
currently {{ groups['seed'] | length }}.
when: "{{ groups['seed'] | length != 1 }}"
when: groups['seed'] | length != 1
- name: Ensure the image cache directory exists
file:

View File

@ -8,4 +8,4 @@
swift_region: 1
# ID of the zone for this Swift service.
swift_zone: "{{ groups['controllers'].index(inventory_hostname) % swift_num_zones }}"
when: "{{ kolla_enable_swift | bool }}"
when: kolla_enable_swift | bool

View File

@ -26,8 +26,8 @@
{% if item in openstack_auth %}empty{% else %}not present{% endif %}
in openstack_auth. Have you sourced the environment file?
when:
- "{{ openstack_auth_type == 'password' }}"
- "{{ item not in openstack_auth or not openstack_auth[item] }}"
- openstack_auth_type == 'password'
- item not in openstack_auth or not openstack_auth[item]
with_items: "{{ openstack_auth_password_required_params }}"
tags:
- config-validation

View File

@ -8,6 +8,9 @@ that define the network's attributes. For example, to configure the ``cidr``
attribute of a network named ``arpanet``, we would use a variable named
``arpanet_cidr``.
Global Network Configuration
============================
Global network configuration is stored in
``${KAYOBE_CONFIG_PATH}/networks.yml``. The following attributes are
supported:
@ -47,6 +50,69 @@ supported:
A name to give to a Libvirt network representing this network on the seed
hypervisor.
Configuring an IP Subnet
------------------------
An IP subnet may be configured by setting the ``cidr`` attribute for a network
to the CIDR representation of the subnet.
To configure a network called ``example`` with the ``10.0.0.0/24`` IP subnet:
.. code-block:: yaml
:caption: ``networks.yml``
example_cidr: 10.0.0.0/24
Configuring an IP Gateway
-------------------------
An IP gateway may be configured by setting the ``gateway`` attribute for a
network to the IP address of the gateway.
To configure a network called ``example`` with a gateway at ``10.0.0.1``:
.. code-block:: yaml
:caption: ``networks.yml``
example_gateway: 10.0.0.1
This gateway will be configured on all hosts to which the network is mapped.
Note that configuring multiple IP gateways on a single host will lead to
unpredictable results.
Configuring Static IP Routes
----------------------------
Static IP routes may be configured by setting the ``routes`` attribute for a
network to a list of routes.
To configure a network called ``example`` with a single IP route:
.. code-block:: yaml
:caption: ``networks.yml``
example_routes:
- cidr: 10.1.0.0/24
gateway: 10.0.0.1
These routes will be configured on all hosts to which the network is mapped.
Configuring a VLAN
------------------
A VLAN network may be configured by setting the ``vlan`` attribute for a
network to the ID of the VLAN.
To configure a network called ``example`` with VLAN ID ``123``:
.. code-block:: yaml
:caption: ``networks.yml``
example_vlan: 123
IP Address Allocation
=====================
IP addresses are allocated automatically by Kayobe from the
allocation pool
defined by ``allocation_pool_start`` and ``allocation_pool_end``. The
@ -54,6 +120,44 @@ allocated addresses are stored in
``${KAYOBE_CONFIG_PATH}/network-allocation.yml`` using the global per-network
attribute ``ips`` which maps Ansible inventory hostnames to allocated IPs.
If static IP address allocation is required, the IP allocation file
``network-allocation.yml`` may be manually populated with the required
addresses.
Configuring Dynamic IP Address Allocation
-----------------------------------------
To configure a network called ``example`` with the ``10.0.0.0/24`` IP subnet
and an allocation pool spanning from ``10.0.0.4`` to ``10.0.0.254``:
.. code-block:: yaml
:caption: ``networks.yml``
example_cidr: 10.0.0.0/24
example_allocation_pool_start: 10.0.0.4
example_allocation_pool_end: 10.0.0.254
.. note::
This pool should not overlap with an inspection or neutron allocation pool
on the same network.
Configuring Static IP Address Allocation
----------------------------------------
To configure a network called ``example`` with statically allocated IP
addresses for hosts ``host1`` and ``host2``:
.. code-block:: yaml
:caption: ``network-allocation.yml``
example_ips:
host1: 10.0.0.1
host2: 10.0.0.2
Per-host Network Configuration
==============================
Some network attributes are specific to a host's role in the system, and
these are stored in
``${KAYOBE_CONFIG_PATH}/inventory/group_vars/<group>/network-interfaces``.
@ -72,51 +176,356 @@ The following attributes are supported:
``bond_miimon``
For bond interfaces, the time in milliseconds between MII link monitoring.
IP Addresses
------------
An interface will be assigned an IP address if the associated network has a
``cidr`` attribute. The IP address will be assigned from the range defined by
the ``allocation_pool_start`` and ``allocation_pool_end`` attributes, if one
has not been statically assigned in ``network-allocation.yml``.
Configuring Ethernet Interfaces
-------------------------------
An Ethernet interface may be configured by setting the ``interface`` attribute
for a network to the name of the Ethernet interface.
To configure a network called ``example`` with an Ethernet interface on
``eth0``:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_interface: eth0
.. _configuring-bridge-interfaces:
Configuring Bridge Interfaces
-----------------------------
A Linux bridge interface may be configured by setting the ``interface``
attribute of a network to the name of the bridge interface, and the
``bridge_ports`` attribute to a list of interfaces which will be added as
member ports on the bridge.
To configure a network called ``example`` with a bridge interface on
``breth1``, and a single port ``eth1``:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_interface: breth1
example_bridge_ports:
- eth1
Bridge member ports may be Ethernet interfaces, bond interfaces, or VLAN
interfaces. In the case of bond interfaces, the bond must be configured
separately in addition to the bridge, as a different named network. In the
case of VLAN interfaces, the underlying Ethernet interface must be configured
separately in addition to the bridge, as a different named network.
Configuring Bond Interfaces
---------------------------
A bonded interface may be configured by setting the ``interface`` attribute of
a network to the name of the bond's master interface, and the ``bond_slaves``
attribute to a list of interfaces which will be added as slaves to the master.
To configure a network called ``example`` with a bond with master interface
``bond0`` and two slaves ``eth0`` and ``eth1``:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_interface: bond0
example_bond_slaves:
- eth0
- eth1
Optionally, the bond mode and MII monitoring interval may also be configured:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_bond_mode: 802.3ad
example_bond_miimon: 100
Bond slaves may be Ethernet interfaces, or VLAN interfaces. In the case of
VLAN interfaces, underlying Ethernet interface must be configured separately in
addition to the bond, as a different named network.
Configuring VLAN Interfaces
---------------------------
A VLAN interface may be configured by setting the ``interface`` attribute of a
network to the name of the VLAN interface. The interface name must be of the
form ``<parent interface>.<VLAN ID>``.
To configure a network called ``example`` with a VLAN interface with a parent
interface of ``eth2`` for VLAN ``123``:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_interface: eth2.123
To keep the configuration DRY, reference the network's ``vlan`` attribute:
.. code-block:: yaml
:caption: ``inventory/group_vars/<group>/network-interfaces``
example_interface: "eth2.{{ example_vlan }}"
Ethernet interfaces, bridges, and bond master interfaces may all be parents to
a VLAN interface.
Bridges and VLANs
^^^^^^^^^^^^^^^^^
Adding a VLAN interface to a bridge directly will allow tagged traffic for that
VLAN to be forwarded by the bridge, whereas adding a VLAN interface to an
Ethernet or bond interface that is a bridge member port will prevent tagged
traffic for that VLAN being forwarded by the bridge.
Network Role Configuration
==========================
In order to provide flexibility in the system's network topology, Kayobe maps
the named networks to logical network roles. A single named network may
perform multiple roles, or even none at all. The available roles are:
``oob_oc_net_name``
Overcloud out-of-band network (``oob_oc_net_name``)
Name of the network used by the seed to access the out-of-band management
controllers of the bare metal overcloud hosts.
``provision_oc_net_name``
Overcloud provisioning network (``provision_oc_net_name``)
Name of the network used by the seed to provision the bare metal overcloud
hosts.
``oob_wl_net_name``
Workload out-of-band network (``oob_wl_net_name``)
Name of the network used by the overcloud hosts to access the out-of-band
management controllers of the bare metal workload hosts.
``provision_wl_net_name``
Workload provisioning network (``provision_wl_net_name``)
Name of the network used by the overcloud hosts to provision the bare metal
workload hosts.
``internal_net_name``
Internal network (``internal_net_name``)
Name of the network used to expose the internal OpenStack API endpoints.
``public_net_name``
Public network (``public_net_name``)
Name of the network used to expose the public OpenStack API endpoints.
``external_net_name``
External network (``external_net_name``)
Name of the network used to provide external network access via Neutron.
``storage_net_name``
Storage network (``storage_net_name``)
Name of the network used to carry storage data traffic.
``storage_mgmt_net_name``
Storage management network (``storage_mgmt_net_name``)
Name of the network used to carry storage management traffic.
``inspection_net_name``
Workload inspection network (``inspection_net_name``)
Name of the network used to perform hardware introspection on the bare
metal workload hosts.
These roles are configured in ``${KAYOBE_CONFIG_PATH}/networks.yml``.
Configuring Network Roles
-------------------------
To configure network roles in a system with two networks, ``example1`` and
``example2``:
.. code-block:: yaml
:caption: ``networks.yml``
oob_oc_net_name: example1
provision_oc_net_name: example1
oob_wl_net_name: example1
provision_wl_net_name: example2
internal_net_name: example2
public_net_name: example2
external_net_name: example2
storage_net_name: example2
storage_mgmt_net_name: example2
inspection_net_name: example2
Overcloud Provisioning Network
------------------------------
If using a seed to inspect the bare metal overcloud hosts, it is necessary to
define a DHCP allocation pool for the seed's ironic inspector DHCP server using
the ``inspection_allocation_pool_start`` and ``inspection_allocation_pool_end``
attributes of the overcloud provisioning network.
.. note::
This example assumes that the ``example`` network is mapped to
``provision_oc_net_name``.
To configure a network called ``example`` with an inspection allocation pool:
.. code-block:: yaml
example_inspection_allocation_pool_start: 10.0.0.128
example_inspection_allocation_pool_end: 10.0.0.254
.. note::
This pool should not overlap with a kayobe allocation pool on the same
network.
Workload Provisioning Network
-----------------------------
If using the overcloud to provision bare metal workload (compute) hosts, it is
necessary to define an IP allocation pool for the overcloud's neutron
provisioning network using the ``neutron_allocation_pool_start`` and
``neutron_allocation_pool_end`` attributes of the workload provisioning
network.
.. note::
This example assumes that the ``example`` network is mapped to
``provision_wl_net_name``.
To configure a network called ``example`` with a neutron provisioning
allocation pool:
.. code-block:: yaml
example_neutron_allocation_pool_start: 10.0.1.128
example_neutron_allocation_pool_end: 10.0.1.195
.. note::
This pool should not overlap with a kayobe or inspection allocation pool on
the same network.
Workload Inspection Network
---------------------------
If using the overcloud to inspect bare metal workload (compute) hosts, it is
necessary to define a DHCP allocation pool for the overcloud's ironic inspector
DHCP server using the ``inspection_allocation_pool_start`` and
``inspection_allocation_pool_end`` attributes of the workload provisioning
network.
.. note::
This example assumes that the ``example`` network is mapped to
``provision_wl_net_name``.
To configure a network called ``example`` with an inspection allocation pool:
.. code-block:: yaml
example_inspection_allocation_pool_start: 10.0.1.196
example_inspection_allocation_pool_end: 10.0.1.254
.. note::
This pool should not overlap with a kayobe or neutron allocation pool on the
same network.
Neutron Networking
==================
.. note::
This assumes the use of the neutron ``openvswitch`` ML2 mechanism driver for
control plane networking.
Certain modes of operation of neutron require layer 2 access to physical
networks in the system. Hosts in the ``network`` group (by default, this is
the same as the ``controllers`` group) run the neutron networking services
(Open vSwitch agent, DHCP agent, L3 agent, metadata agent, etc.).
The kayobe network configuration must ensure that the neutron Open
vSwitch bridges on the network hosts have access to the external network. If
bare metal compute nodes are in use, then they must also have access to the
workload provisioning network. This can be done by ensuring that the external
and workload provisioning network interfaces are bridges. Kayobe will ensure
connectivity between these Linux bridges and the neutron Open vSwitch bridges
via a virtual Ethernet pair. See :ref:`configuring-bridge-interfaces`.
Network to Host Mapping
=======================
Networks are mapped to hosts using the variable ``network_interfaces``.
Kayobe's playbook group variables define some sensible defaults for this
variable for hosts in the ``seed`` and ``controllers`` groups based on the
logical network roles. These defaults can be extended by setting the variables
``seed_extra_network_interfaces`` and ``controller_extra_network_interfaces``
in ``${KAYOBE_CONFIG_PATH}/seed.yml`` and
``${KAYOBE_CONFIG_PATH}/controllers.yml`` respectively.
variable for hosts in the top level standard groups. These defaults are set
using the network roles typically required by the group.
Example
=======
Seed
----
In our example cloud we have three networks: ``management``, ``cloud`` and
``external``:
By default, the seed is attached to the following networks:
* overcloud out-of-band network
* overcloud provisioning network
This list may be extended by setting ``seed_extra_network_interfaces`` to a
list of names of additional networks to attach. Alternatively, the list may be
completely overridden by setting ``seed_network_interfaces``. These variables
are found in ``${KAYOBE_CONFIG_PATH}/seed.yml``.
Seed Hypervisor
---------------
By default, the seed hypervisor is attached to the same networks as the seed.
This list may be extended by setting
``seed_hypervisor_extra_network_interfaces`` to a list of names of additional
networks to attach. Alternatively, the list may be
completely overridden by setting ``seed_hypervisor_network_interfaces``. These
variables are found in ``${KAYOBE_CONFIG_PATH}/seed-hypervisor.yml``.
Controllers
-----------
By default, controllers are attached to the following networks:
* overcloud provisioning network
* workload (compute) out-of-band network
* workload (compute) provisioning network
* internal network
* storage network
* storage management network
In addition, if the controllers are also in the ``network`` group, they are
attached to the following networks:
* public network
* external network
This list may be extended by setting ``controller_extra_network_interfaces`` to a
list of names of additional networks to attach. Alternatively, the list may be
completely overridden by setting ``controller_network_interfaces``. These
variables are found in ``${KAYOBE_CONFIG_PATH}/controllers.yml``.
Monitoring Hosts
----------------
By default, the monitoring hosts are attached to the same networks as the
controllers when they are in the ``controllers`` group. If the monitoring
hosts are not in the ``controllers`` group, they are attached to the following
networks by default:
* overcloud provisioning network
* internal network
* public network
This list may be extended by setting ``monitoring_extra_network_interfaces`` to
a list of names of additional networks to attach. Alternatively, the list may
be completely overridden by setting ``monitoring_network_interfaces``. These
variables are found in ``${KAYOBE_CONFIG_PATH}/monitoring.yml``.
Other Hosts
-----------
If additional hosts are managed by kayobe, the networks to which these hosts
are attached may be defined in a host or group variables file. See
:ref:`control-plane-service-placement` for further details.
Complete Example
================
The following example combines the complete network configuration into a single
system configuration. In our example cloud we have three networks:
``management``, ``cloud`` and ``external``:
.. parsed-literal::
@ -140,20 +549,21 @@ In our example cloud we have three networks: ``management``, ``cloud`` and
external +---------------------------------------+----------------------------------------+
The ``management`` network is used to access the servers' BMCs and by the seed
to provision the cloud hosts. The ``cloud`` network carries all internal
control plane and storage traffic, and is used by the control plane to
to inspect and provision the cloud hosts. The ``cloud`` network carries all
internal control plane and storage traffic, and is used by the control plane to
provision the bare metal compute hosts. Finally, the ``external`` network
links the cloud to the outside world.
We could describe such a network as follows:
.. code-block:: yaml
:name: networks.yml
:caption: ``networks.yml``
---
# Network role mappings.
oob_oc_net_name: management
provision_oc_net_name: management
oob_wl_net_name: management
provision_wl_net_name: cloud
internal_net_name: cloud
public_net_name: external
@ -170,13 +580,13 @@ We could describe such a network as follows:
management_inspection_allocation_pool_end: 10.0.0.254
# cloud network definition.
cloud_cidr: 10.0.1.0/23
cloud_cidr: 10.0.1.0/24
cloud_allocation_pool_start: 10.0.1.1
cloud_allocation_pool_end: 10.0.1.127
cloud_inspection_allocation_pool_start: 10.0.1.128
cloud_inspection_allocation_pool_end: 10.0.1.255
cloud_neutron_allocation_pool_start: 10.0.2.0
cloud_neutron_allocation_pool_end: 10.0.2.254
cloud_inspection_allocation_pool_end: 10.0.1.195
cloud_neutron_allocation_pool_start: 10.0.1.196
cloud_neutron_allocation_pool_end: 10.0.1.254
# external network definition.
external_cidr: 10.0.3.0/24
@ -191,14 +601,12 @@ We could describe such a network as follows:
We can map these networks to network interfaces on the seed and controller hosts:
.. code-block:: yaml
:name: inventory/group_vars/seed/network-interfaces
:caption: ``inventory/group_vars/seed/network-interfaces``
---
management_interface: eth0
.. code-block:: yaml
:name: inventory/group_vars/controllers/network-interfaces
:caption: ``inventory/group_vars/controllers/network-interfaces``
---
@ -214,7 +622,6 @@ allow it to be plugged into a neutron Open vSwitch bridge.
Kayobe will allocate IP addresses for the hosts that it manages:
.. code-block:: yaml
:name: network-allocation.yml
:caption: ``network-allocation.yml``
---

View File

@ -27,6 +27,10 @@ Upgrade Notes
inspector's DHCP server.
* Disables swift by default. The default value of ``kolla_enable_swift`` is
now ``no``.
* The default list of neutron ML2 mechanism drivers
(``kolla_neutron_ml2_mechanism_drivers``) has been removed in favour of using
the defaults provided by kolla-ansible. Users relying on the default list of
``openvswitch`` and ``genericswitch`` should set the value explicitly.
* Adds a variable ``config_path``, used to set the base path to configuration
on remote hosts. The default value is ``/opt/kayobe/etc``.
* Modifies the variable used to configure the kolla build configuration path

View File

@ -60,6 +60,12 @@
# Docker namespace to use for Kolla images.
#kolla_docker_namespace:
# Username to use to access a docker registry.
#kolla_docker_registry_username:
# Password to use to access a docker registry.
#kolla_docker_registry_password:
# Kolla OpenStack release version. This should be a Docker image tag.
#kolla_openstack_release:

View File

@ -2,7 +2,8 @@
###############################################################################
# Neutron configuration.
# List of Neutron ML2 mechanism drivers to use.
# List of Neutron ML2 mechanism drivers to use. If unset the kolla-ansible
# defaults will be used.
#kolla_neutron_ml2_mechanism_drivers:
# List of Neutron ML2 type drivers to use.