tripleo-heat-templates/docker/services/octavia-health-manager.yaml
Carlos Camacho 44ef2a3ec1 Change template names to rocky
The new master branch should point now to rocky.

So, HOT templates should specify that they might contain features
for rocky release [1]

Also, this submission updates the yaml validation to use only latest
heat_version alias. There are cases in which we will need to set
the version for specific templates i.e. mixed versions, so there
is added a variable to assign specific templates to specific heat_version
aliases, avoiding the introductions of error by bulk replacing the
the old version in new releases.

[1]: https://docs.openstack.org/heat/latest/template_guide/hot_spec.html#rocky
Change-Id: Ib17526d9cc453516d99d4659ee5fa51a5aa7fb4b
2018-05-09 08:28:42 +02:00

154 lines
6.3 KiB
YAML

heat_template_version: rocky
description: >
OpenStack Octavia health-manager service configured with Puppet
parameters:
DockerOctaviaHealthManagerImage:
description: image
type: string
DockerOctaviaConfigImage:
description: The container image to use for the octavia config_volume
type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
UpgradeRemoveUnusedPackages:
default: false
description: Remove package if the service is being disabled during upgrade
type: boolean
resources:
ContainersCommon:
type: ./containers-common.yaml
OctaviaHealthManagerPuppetBase:
type: ../../puppet/services/octavia-health-manager.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Octavia health-manager role.
value:
service_name: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_name]}
config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, config_settings]}
logging_source: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, logging_source]}
logging_groups: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, logging_groups]}
service_config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS #
puppet_config:
config_volume: octavia
puppet_tags: octavia_config
step_config:
get_attr: [OctaviaHealthManagerPuppetBase, role_data, step_config]
config_image: {get_param: DockerOctaviaConfigImage}
kolla_config:
/var/lib/kolla/config_files/octavia_health_manager.json:
command: /usr/bin/octavia-health-manager --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/health-manager.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-health-manager
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
docker_config:
step_2:
octavia_health_manager_init_dirs:
start_order: 0
image: &octavia_health_manager_image {get_param: DockerOctaviaHealthManagerImage}
user: root
volumes:
# NOTE(mandre) we need extra dir for the service in /etc/octavia/conf.d
# It is normally created as part of the RPM install, but it is
# missing here because we use the same config_volume for all
# octavia services, hence the same container image to generate
# configuration.
- /var/lib/config-data/puppet-generated/octavia/etc/octavia:/etc/octavia/
command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-health-manager; chown -R octavia:octavia /etc/octavia/conf.d/octavia-health-manager']
step_4:
octavia_health_manager:
start_order: 2
image: *octavia_health_manager_image
net: host
privileged: false
restart: always
healthcheck:
test: /openstack/healthcheck
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/kolla/config_files/octavia_health_manager.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/octavia/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/octavia:/var/log/octavia
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: create persistent logs directory
file:
path: /var/log/containers/octavia
state: directory
- name: octavia logs readme
copy:
dest: /var/log/octavia/readme.txt
content: |
Log files from octavia containers can be found under
/var/log/containers/octavia and /var/log/containers/httpd/octavia-api.
ignore_errors: true
upgrade_tasks:
- name: Check if octavia_health_manager is deployed
common: systemctl is-enabled --quiet openstack-octavia-health-manager
tags: common
ignore_errors: True
register: octavia_health_enabled
- name: "PreUpgrade step0,validation: Check service openstack-octavia-health-manager is running"
command: systemctl is-active --quiet openstack-octavia-health-manager
tags: validation
when:
- step|int == 0
- octavia_health_enabled.rc == 0
- name: Stop and disable octavia_health_manager service
when:
- step|int == 2
- octavia_health_enabled.rc == 0
service: name=openstack-octavia-health-manager state=stopped enabled=no
- name: Set fact for removal of openstack-octavia-health-manager package
when: step|int == 2
set_fact:
remove_octavia_health_manager_package: {get_param: UpgradeRemoveUnusedPackages}
- name: Remove openstack-octavia-health-manager package if operator requests it
yum: name=openstack-octavia-health-manager state=removed
ignore_errors: True
when:
- step|int == 2
- remove_octavia_health_manager_package|bool