1042 lines
37 KiB
YAML
Raw Normal View History

{%- set primary_role = [roles[0]] -%}
{%- for role in roles -%}
{%- if 'primary' in role.tags and 'controller' in role.tags -%}
{%- set _ = primary_role.pop() -%}
{%- set _ = primary_role.append(role) -%}
{%- endif -%}
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
heat_template_version: rocky
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
roles enable independent scaling of the storage components, but the minimal
deployment is one Controller and one Compute node.
# TODO(shadower): we should probably use the parameter groups to put
# some order in here.
parameters:
# Common parameters (not specific to a role)
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
CloudName:
description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
type: string
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
CloudNameInternal:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
CloudNameStorageManagement:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- else %}
CloudName{{network.name}}:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- endif %}
{%- endfor %}
CloudNameCtlplane:
default: overcloud.ctlplane.localdomain
description: >
The DNS name of this cloud's provisioning network endpoint. E.g.
'ci-overcloud.ctlplane.tripleo.org'.
type: string
ExtraHostFileEntries:
default: []
description: List of extra hosts entries to be appended to /etc/hosts
type: comma_delimited_list
EndpointMapOverride:
default: {}
description: Can be used to override the calcluated EndpointMap
type: json
ExtraConfig:
default: {}
description: |
Additional hiera configuration to inject into the cluster.
type: json
{%- for role in roles %}
{{role.name}}ExtraConfig:
default: {}
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
{%- if role.deprecated_param_extraconfig is defined %}
{{role.deprecated_param_extraconfig}}:
default: {}
description: |
DEPRECATED use {{role.name}}ExtraConfig instead
type: json
{%- endif %}
{%- endfor %}
NeutronControlPlaneID:
default: 'ctlplane'
type: string
description: Neutron ID or name for ctlplane network.
NeutronPublicInterface:
default: nic1
description: Which interface to add to the NeutronPhysicalBridge.
type: string
ControlPlaneSubnet:
description: The name of the undercloud Neutron control plane subnet
default: ctlplane-subnet
type: string
ControlFixedIPs:
default: []
description: >
Control the IP allocation for the ControlVirtualIP port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# TODO (dsneddon) Legacy name, eventually refactor to match network name
PublicVirtualFixedIPs:
default: []
description: >
Control the IP allocation for the PublicVirtualInterface port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
{%- else %}
{{network.name}}VirtualFixedIPs:
default: []
description: >
Control the IP allocation for the {{network.name}}VirtualInterface port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
{%- endif %}
{%- endfor %}
RabbitCookieSalt:
type: string
default: unset
description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
RedisVirtualFixedIPs:
default: []
description: >
Control the IP allocation for the virtual IP used by Redis. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
CloudDomain:
default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This must match the
overcloud_domain_name configured on the undercloud.
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
# Compute-specific params
# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
HypervisorNeutronPhysicalBridge:
default: 'br-ex'
description: >
An OVS bridge to create on each hypervisor. This defaults to br-ex the
same as the control plane nodes, as we have a uniform configuration of
the openvswitch agent. Typically should not need to be changed.
type: string
HypervisorNeutronPublicInterface:
default: nic1
description: What interface to add to the HypervisorNeutronPhysicalBridge.
type: string
NodeCreateBatchSize:
default: 30
description: Maxiumum batch size for creating nodes
type: number
# Jinja loop for Role in role_data.yaml
{% for role in roles %}
# Parameters generated for {{role.name}} Role
{{role.name}}Services:
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the {{role.name}} role.
type: comma_delimited_list
{{role.name}}Count:
description: Number of {{role.name}} nodes to deploy
type: number
default: {{role.CountDefault|default(0)}}
{{role.name}}HostnameFormat:
type: string
description: >
Format for {{role.name}} node hostnames
Note %index% is translated into the index of the node, e.g 0/1/2 etc
and %stackname% is replaced with the stack name e.g overcloud
{% if role.HostnameFormatDefault %}
default: "{{role.HostnameFormatDefault}}"
{% else %}
default: "%stackname%-{{role.name.lower()}}-%index%"
{% endif %}
{{role.name}}RemovalPolicies:
default: []
type: json
description: >
List of resources to be removed from {{role.name}} ResourceGroup when
doing an update which requires removal of specific resources.
Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
{{role.name}}SchedulerHints:
type: json
description: Optional scheduler hints to pass to nova
default: {}
{%- if role.deprecated_param_scheduler_hints is defined %}
{{role.deprecated_param_scheduler_hints}}:
type: json
description: DEPRECATED - use {{role.name}}SchedulerHints instead
default: {}
{%- endif %}
{{role.name}}Parameters:
type: json
description: Optional Role Specific parameters to be provided to service
default: {}
{% endfor %}
# Identifiers to trigger tasks on nodes
UpdateIdentifier:
default: ''
type: string
description: >
Setting to a previously unused value during stack-update will trigger
package update on all nodes
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
AddVipsToEtcHosts:
default: True
type: boolean
description: >
Set to true to append per network Vips to /etc/hosts on each node.
DeploymentServerBlacklist:
default: []
type: comma_delimited_list
description: >
List of server hostnames to blacklist from any triggered deployments.
{% for role in roles %}
{%- if role.deprecated_param_scheduler_hints is defined or role.deprecated_param_extraconfig is defined %}
{%- if not parameter_groups_defined|default(false) %}
parameter_groups:
- label: deprecated
description: Do not use deprecated params, they will be removed.
parameters:
{%- set parameter_groups_defined = true %}
{%- endif %}
{%- endif %}
{%- if role.deprecated_param_scheduler_hints is defined %}
- {{role.deprecated_param_scheduler_hints}}
{%- endif %}
{%- if role.deprecated_param_extraconfig is defined %}
- {{role.deprecated_param_extraconfig}}
{%- endif %}
{%- endfor %}
conditions:
add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
control_fixed_ip_not_set: {equals : [{get_param: ControlFixedIPs}, []]}
resources:
VipHosts:
type: OS::Heat::Value
properties:
type: string
value:
list_join:
- "\n"
- - str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
HOST: {get_param: CloudNameCtlplane}
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudNameStorageManagement}
{%- else %}
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
HeatAuthEncryptionKey:
type: OS::TripleO::RandomString
PcsdPassword:
type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
type: OS::TripleO::RandomString
properties:
length: 10
NetCidrMapValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_merge:
- {get_attr: [Networks, net_cidr_map]}
- ctlplane: {get_attr: [ControlVirtualIP, subnets, 0, cidr]}
- keys:
ctlplane: {get_param: NeutronControlPlaneID}
values:
disabled: {get_attr: [ControlVirtualIP, subnets, 0, cidr]}
ServiceNetMap:
type: OS::TripleO::ServiceNetMap
EndpointMap:
type: OS::TripleO::EndpointMap
properties:
CloudEndpoints:
ctlplane: {get_param: CloudNameCtlplane}
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
{{network.name_lower}}: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
{{network.name_lower}}: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
{{network.name_lower}}: {get_param: CloudNameStorageManagement}
{%- else %}
{{network.name_lower}}: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
NetIpMap: {get_attr: [VipMap, net_ip_map]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMapData:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
- {get_attr: [EndpointMap, endpoint_map]}
- {get_param: EndpointMapOverride}
SshKnownHostsConfig:
type: OS::TripleO::Ssh::KnownHostsConfig
properties:
known_hosts:
list_join:
- ''
{% for role in roles %}
- {get_attr: [{{role.name}}, known_hosts_entry]}
{% endfor %}
SshKnownHostsHostnames:
type: OS::Heat::Value
properties:
value:
map_merge:
list_concat:
{% for role in roles %}
- {get_attr: [{{role.name}}, known_hosts_hostnames]}
{% endfor %}
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
{{role.name}}ServiceChain:
type: OS::TripleO::Services
properties:
Services:
get_param: {{role.name}}Services
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
ServiceData:
net_cidr_map: {get_attr: [NetCidrMapValue, value]}
EndpointMap: {get_attr: [EndpointMapData, value]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
RoleName: {{role.name}}
RoleParameters:
map_merge:
- {{role.RoleParametersDefault|default({})}}
- get_param: {{role.name}}Parameters
# Lookup of role_data via heat outputs is slow, so workaround this by caching
# the value in an OS::Heat::Value resource
{{role.name}}ServiceChainRoleData:
type: OS::Heat::Value
properties:
type: json
value: {get_attr: [{{role.name}}ServiceChain, role_data]}
{{role.name}}ServiceConfigSettings:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
- get_attr: [{{role.name}}ServiceChainRoleData, value, config_settings]
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChainRoleData, value, global_config_settings]
{% endfor %}
# This next step combines two yaql passes:
# - The inner one does a deep merge on the service_config_settings for all roles
# - The outer one filters the map based on the services enabled for the role
# then merges the result into one map.
- yaql:
expression: let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})
data:
map:
yaql:
expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
data:
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChainRoleData, value, service_config_settings]
{% endfor %}
services: {get_attr: [{{role.name}}ServiceNames, value]}
{{role.name}}MergedConfigSettings:
type: OS::Heat::Value
properties:
type: json
value:
config_settings: {}
global_config_settings: {}
service_config_settings: {}
merged_config_settings:
map_merge:
- get_attr: [{{role.name}}ServiceConfigSettings, value]
- get_param: ExtraConfig
{%- if role.deprecated_param_extraconfig is defined %}
- get_param: {{role.deprecated_param_extraconfig}}
{%- endif %}
- get_param: {{role.name}}ExtraConfig
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
# Filter any null/None service_names which may be present due to mapping
# of services to OS::Heat::None
{{role.name}}ServiceNames:
type: OS::Heat::Value
depends_on: {{role.name}}ServiceChain
properties:
type: comma_delimited_list
value:
yaql:
expression: coalesce($.data, []).where($ != null)
data: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_names]}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
{{role.name}}HostsDeployment:
type: OS::Heat::StructuredDeployments
properties:
name: {{role.name}}HostsDeployment
config: {get_attr: [hostsConfig, config_id]}
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}SshKnownHostsDeployment:
type: OS::TripleO::Ssh::KnownHostsDeployment
properties:
name: {{role.name}}SshKnownHostsDeployment
config: {get_resource: SshKnownHostsConfig}
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
type: OS::TripleO::AllNodesDeployment
depends_on:
{% for role_inner in roles %}
- {{role_inner.name}}HostsDeployment
{% endfor %}
properties:
name: {{role.name}}AllNodesDeployment
config: {get_attr: [allNodesConfig, config_id]}
servers: {get_attr: [{{role.name}}Servers, value]}
input_values:
# Note we have to use yaql to look up the first hostname/ip in the
# list because heat path based attributes operate on the attribute
# inside the ResourceGroup, not the exposed list ref discussion in
# https://bugs.launchpad.net/heat/+bug/1640488
# The coalesce is needed because $.data is None during heat validation
bootstrap_nodeid:
yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{role.name}}, hostname]}
bootstrap_nodeid_ip:
yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{role.name}}, ip_address]}
{{role.name}}AllNodesValidationDeployment:
type: OS::Heat::StructuredDeployments
depends_on: {{role.name}}AllNodesDeployment
properties:
name: {{role.name}}AllNodesValidationDeployment
config: {get_resource: AllNodesValidationConfig}
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}IpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
properties:
ControlPlaneIpList: {get_attr: [{{role.name}}, ip_address]}
{%- for network in networks %}
{%- if network.enabled|default(true) %}
{{network.name}}IpList: {get_attr: [{{role.name}}, {{network.name_lower}}_ip_address]}
{%- else %}
{{network.name}}IpList: {get_attr: [{{role.name}}, ip_address]}
{%- endif %}
{%- endfor %}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
NetworkHostnameMap: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{{role.name}}NetworkHostnameMap:
type: OS::Heat::Value
properties:
type: json
value:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
# list of maps, but appends to colliding lists so we can
# create a map of lists for all nodes for each network
yaql:
expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
- {get_attr: [{{role.name}}, hostname_map]}
{{role.name}}:
type: OS::Heat::ResourceGroup
depends_on: Networks
update_policy:
batch_create:
max_batch_size: {get_param: NodeCreateBatchSize}
properties:
count: {get_param: {{role.name}}Count}
removal_policies: {get_param: {{role.name}}RemovalPolicies}
resource_def:
type: OS::TripleO::{{role.name}}
properties:
CloudDomain: {get_param: CloudDomain}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMap: {get_attr: [EndpointMapData, value]}
Hostname:
str_replace:
template: {get_param: {{role.name}}HostnameFormat}
params:
'%stackname%': {get_param: 'OS::stack_name'}
NodeIndex: '%index%'
# Note, SchedulerHints must be defined here, not only in the
# nested template, as it can contain %index%
{{role.name}}SchedulerHints:
map_merge:
{%- if role.deprecated_param_scheduler_hints is defined %}
- {get_param: {{role.deprecated_param_scheduler_hints}}}
{%- endif %}
- {get_param: {{role.name}}SchedulerHints}
ServiceConfigSettings: {get_attr: [{{role.name}}ServiceConfigSettings, value]}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
LoggingSources: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
LoggingGroups: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
RoleParameters:
map_merge:
- {{role.RoleParametersDefault|default({})}}
- get_param: {{role.name}}Parameters
{% endfor %}
{% for role in roles %}
{{role.name}}Servers:
type: OS::Heat::Value
depends_on: {{role.name}}
properties:
type: json
value:
yaql:
expression: let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null))
data:
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
# This is a different format to *Servers, as it creates a map of lists
# whereas *Servers creates a map of maps with keys of the nested resource names
ServerIdMap:
type: OS::Heat::Value
properties:
value:
server_ids:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}, nova_server_resource]}
{% endfor %}
bootstrap_server_id:
yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{primary_role_name}}, nova_server_resource]}
# This resource just creates a dict out of the DeploymentServerBlacklist,
# which is a list. The dict is used in the role templates to set a condition
# on whether to create the deployment resources. We can't use the list
# directly because there is no way to ask Heat if a list contains a specific
# value.
DeploymentServerBlacklistDict:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
repeat:
template:
hostname: 1
for_each:
hostname: {get_param: DeploymentServerBlacklist}
hostsConfig:
type: OS::TripleO::Hosts::SoftwareConfig
properties:
hosts:
list_join:
- "\n"
- - if:
- add_vips_to_etc_hosts
- {get_attr: [VipHosts, value]}
- ''
-
{% for role in roles %}
- list_join:
- ""
- {get_attr: [{{role.name}}, hosts_entry]}
{% endfor %}
- {get_param: ExtraHostFileEntries}
allNodesConfig:
type: OS::TripleO::AllNodes::SoftwareConfig
properties:
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
cloud_name_{{network.name_lower}}: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
cloud_name_{{network.name_lower}}: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
cloud_name_{{network.name_lower}}: {get_param: CloudNameStorageManagement}
{%- else %}
cloud_name_{{network.name_lower}}: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
cloud_name_ctlplane: {get_param: CloudNameCtlplane}
enabled_services:
list_join:
- ','
{% for role in roles %}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
- {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
cellv2_discovery_hosts:
# Collects compute hostnames for all roles with a service that requires cellv2 host discovery
list_join:
- ','
- yaql:
expression: coalesce($.data.e.zip($.data.l).where($[0]).select($[1]).flatten(), [])
data:
e: # list of true/fails for whether cellsv2 host discovery is required for the roles
{%- for role in roles %}
- {get_attr: [{{role.name}}ServiceChainRoleData, value, cellv2_discovery]}
{%- endfor %}
l: # list of list of compute hostnames for the roles
{%- for role in roles %}
- {get_attr: [{{role.name}}, hostname_map, canonical]}
{%- endfor %}
controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
controller_names: {get_attr: [{{primary_role_name}}, hostname]}
service_ips:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
# list of maps, but appends to colliding lists when a service
# is deployed on more than one role
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
l:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, service_ips]}
{% endfor %}
service_node_names:
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
l:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, service_hostnames]}
{% endfor %}
short_service_node_names:
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
l:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
{% endfor %}
short_service_bootstrap_node:
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten().first()]))
data:
l:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
{% endfor %}
NetVipMap: {get_attr: [VipMap, net_ip_map]}
RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
DeployIdentifier: {get_param: DeployIdentifier}
UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
DefaultPasswords:
type: OS::TripleO::DefaultPasswords
properties:
DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}
# creates the network architecture
Networks:
type: OS::TripleO::Network
ControlVirtualIP:
type: OS::TripleO::Network::Ports::ControlPlaneVipPort
depends_on: Networks
properties:
name: control_virtual_ip
network: {get_param: NeutronControlPlaneID}
fixed_ips:
if:
- control_fixed_ip_not_set
- [{subnet: {get_param: ControlPlaneSubnet}}]
- get_param: ControlFixedIPs
replacement_policy: AUTO
RedisVirtualIP:
depends_on: Networks
type: OS::TripleO::Network::Ports::RedisVipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
PortName: redis_virtual_ip
NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
ServiceName: redis
FixedIPs: {get_param: RedisVirtualFixedIPs}
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
# The public VIP is on the External net, falls back to ctlplane
PublicVirtualIP:
depends_on: Networks
type: OS::TripleO::Network::Ports::ExternalVipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
PortName: public_virtual_ip
FixedIPs: {get_param: PublicVirtualFixedIPs}
{%- elif network.name == 'StorageMgmt' %}
{{network.name}}VirtualIP:
depends_on: Networks
type: OS::TripleO::Network::Ports::{{network.name}}VipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
PortName: storage_management_virtual_ip
FixedIPs: {get_param: {{network.name}}VirtualFixedIPs}
{%- else %}
{{network.name}}VirtualIP:
depends_on: Networks
type: OS::TripleO::Network::Ports::{{network.name}}VipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
PortName: {{network.name_lower}}_virtual_ip
FixedIPs: {get_param: {{network.name}}VirtualFixedIPs}
{%- endif %}
{%- endfor %}
VipMap:
type: OS::TripleO::Network::Ports::NetVipMap
properties:
ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
{%- for network in networks if network.vip|default(false) %}
{%- if network.name == 'External' %}
ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
Add IPv6 Support to Isolated Networks This change adds a new set of network templates with IPv6 subnets that can be used instead of the existing IPv4 networks. Each network can use either the IPv4 or IPv6 template, and the Neutron subnet will be created with the specified IP version. The default addresses used for the IPv6 networks use the fd00::/8 prefix for the internal isolated networks (this range is reserved for private use similar to 10.0.0.0/8), and 2001:db8:fd00:1000::/64 is used as an example default for the External network (2001:db8::/32 are the documentation addresses [RFC3849]), but this would ordinarily be a globally addressable subnet. These parameters may be overridden in an environment file. This change will require updates to the OpenStack Puppet Modules to support IPv6 addresses in some of the hieradata values. Many of the OPM modules already have IPv6 support to support IPv6 deployments in Packstack, but some OPM packages that apply only to Instack/TripleO deployments need to be updated. IPv6 addresses used in URLs need to be surrounded by brackets in order to differentiate IP address from port number. This change adds a new output to the network/ports resources for ip_address_uri, which is an IP address with brackets in the case of IPv6, and a raw IP address without brackets for IPv4 ports. This change also updates some URLs which are constructed in Heat. This has been tested and problems were found with Puppet not accepting IPv6 addresses. This is addressed in the latest Puppet. Additional changes were required to make this work with Ceph. IPv6 tunnel endpoints with Open vSwitch are not yet supported (although support is coming soon), so this review leaves the Tenant network as an isolated IPv4 network for the time being. Change-Id: Ie7a742bdf1db533edda2998a53d28528f80ef8e2
2015-10-15 08:10:44 -07:00
ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
{%- else %}
{{network.name}}Ip: {get_attr: [{{network.name}}VirtualIP, ip_address]}
{{network.name}}IpUri: {get_attr: [{{network.name}}VirtualIP, ip_address_uri]}
{%- endif %}
{%- endfor %}
# No tenant or management VIP required
# Because of nested get_attr functions in the KeystoneAdminVip output, we
# can't determine which attributes of VipMap are used until after
# ServiceNetMap's attribute values are available.
depends_on: ServiceNetMap
# All Nodes Validations
AllNodesValidationConfig:
type: OS::TripleO::AllNodes::Validation
properties:
PingTestIps:
list_join:
- ' '
-
{%- for network in networks if network.enabled|default(true) %}
- yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{primary_role_name}}, {{network.name_lower}}_ip_address]}
{%- endfor %}
# Optional ExtraConfig for all nodes - all roles are passed in here, but
# the nested template may configure each role differently (or not at all)
AllNodesExtraConfig:
type: OS::TripleO::AllNodesExtraConfig
depends_on:
{% for role in roles %}
- {{role.name}}AllNodesValidationDeployment
{% endfor %}
properties:
servers:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
BlacklistedIpAddresses:
type: OS::Heat::Value
properties:
value:
list_concat:
{% for role in roles %}
- {get_attr: [{{role.name}}, blacklist_ip_address]}
{% endfor %}
BlacklistedHostnames:
type: OS::Heat::Value
properties:
value:
list_concat:
{% for role in roles %}
- {get_attr: [{{role.name}}, blacklist_hostname]}
{% endfor %}
# Post deployment steps for all roles
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
depends_on:
- AllNodesExtraConfig
{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
properties:
servers:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
stack_name: {get_param: 'OS::stack_name'}
EndpointMap: {get_attr: [EndpointMapData, value]}
ctlplane_service_ips:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
# list of maps, but appends to colliding lists when a service
# is deployed on more than one role
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
l:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
{% endfor %}
role_data:
{% for role in roles %}
{{role.name}}:
map_merge:
- {get_attr: [{{role.name}}ServiceChainRoleData, value]}
- {get_attr: [{{role.name}}MergedConfigSettings, value]}
{% endfor %}
blacklisted_ip_addresses: {get_attr: [BlacklistedIpAddresses, value]}
blacklisted_hostnames: {get_attr: [BlacklistedHostnames, value]}
ssh_known_hosts_hostnames: {get_attr: [SshKnownHostsHostnames, value]}
ServerOsCollectConfigData:
type: OS::Heat::Value
properties:
type: json
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
{% endfor %}
DeployedServerEnvironment:
type: OS::TripleO::DeployedServerEnvironment
properties:
RoleCounts:
{% for role in roles %}
{{role.name}}DeployedServerCount: {get_param: {{role.name}}Count}
{% endfor %}
VipMap:
map_merge:
- {get_attr: [VipMap, net_ip_map]}
- redis: {get_attr: [RedisVirtualIP, ip_address]}
DeployedServerPortMap:
map_merge:
list_concat:
{% for role in roles %}
- {get_attr: [{{role.name}}, deployed_server_port_map]}
{% endfor %}
DeployedServerDeploymentSwiftDataMap:
map_merge:
list_concat:
{% for role in roles %}
- {get_attr: [{{role.name}}, deployed_server_deployment_swift_data_map]}
{% endfor %}
DefaultRouteIp:
str_split:
- ':'
- str_split:
- '/'
- {get_attr: [ServerOsCollectConfigData, value, {{primary_role_name}}, '0', request, metadata_url]}
- 2
- 0
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
value: true
KeystoneURL:
description: URL for the Overcloud Keystone service
value: {get_attr: [EndpointMapData, value, KeystonePublic, uri]}
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
# Note that these nested get_attr functions require a dependency
# relationship between VipMap and ServiceNetMap, since we can't determine
# which attributes of VipMap are used until after ServiceNetMap's attribute
# values are available. If this is ever reworked to not use nested
# get_attr, that dependency can be removed.
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
EndpointMap:
description: |
Mapping of the resources with the needed info for their endpoints.
This includes the protocol used, the IP, port and also a full
representation of the URI.
value: {get_attr: [EndpointMapData, value]}
HostsEntry:
description: |
The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
value:
list_join:
- "\n"
- - {get_attr: [hostsConfig, hosts_entries]}
- - {get_attr: [VipHosts, value]}
EnabledServices:
description: The services enabled on each role
value:
{% for role in roles %}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-03 22:21:44 -05:00
{{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
RoleData:
description: The configuration data associated with each role
value:
{% for role in roles %}
{{role.name}}:
map_merge:
- {get_attr: [{{role.name}}ServiceChainRoleData, value]}
- {get_attr: [{{role.name}}MergedConfigSettings, value]}
{% endfor %}
RoleConfig:
description: The configuration workflows associated with each role
value: {get_attr: [AllNodesDeploySteps, RoleConfig]}
RoleNetIpMap:
description: Mapping of each network to a list of IPs for each role
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
{% endfor %}
RoleNetHostnameMap:
description: Mapping of each network to a list of hostnames for each role
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
RoleTags:
description: Tags for each role, as defined in roles_data.yaml
value:
{%- for role in roles %}
{{role.name}}: {{role.tags|default([])}}
{%- endfor %}
ServerOsCollectConfigData:
description: The os-collect-config configuration associated with each server resource
value: {get_attr: [ServerOsCollectConfigData, value]}
VipMap:
description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
value:
map_merge:
- {get_attr: [VipMap, net_ip_map]}
- redis: {get_attr: [RedisVirtualIP, ip_address]}
ServerIdData:
description: Mapping of each role to a list of nova server IDs and the bootstrap ID
value: {get_attr: [ServerIdMap, value]}
DeployedServerEnvironment:
description:
Environment data that can be used as input into the services stack when
using split-stack.
value: {get_attr: [DeployedServerEnvironment, deployed_server_environment]}