Retire Tripleo: remove repo content

TripleO project is retiring
- https://review.opendev.org/c/openstack/governance/+/905145

this commit remove the content of this project repo

Change-Id: I7f1dc7801a025d9bb28110f74340846b99870b79
This commit is contained in:
Ghanshyam Mann 2024-02-24 11:32:57 -08:00
parent 1393d39be3
commit c2ff9b8f49
1794 changed files with 8 additions and 116224 deletions

View File

@ -1,25 +0,0 @@
kinds:
- j2: "**/*.j2.yaml"
- playbook: "**/common/*.yml"
- yaml: "roles/*.yaml"
mock_modules:
- tripleo_all_nodes_data
skip_list:
# TODO(zbr): Address in follow-ups
- role-name
- unnamed-task
# yaml ones
- yaml[braces]
- yaml[colons]
- yaml[commas]
- yaml[comments]
- yaml[comments-indentation]
- yaml[empty-lines]
- yaml[indentation]
- yaml[line-length]
- yaml[new-line-at-end-of-file]
- yaml[trailing-spaces]
- yaml[truthy]
- yaml[brackets]
- yaml[hyphens]
- experimental

191
.gitignore vendored
View File

@ -1,191 +0,0 @@
# Add patterns in here to exclude files created by tools integrated with this
# repository, such as test frameworks from the project's recommended workflow,
# rendered documentation and package builds.
#
# Don't add patterns to exclude files created by preferred personal tools
# (editors, IDEs, your operating system itself even). These should instead be
# maintained outside the repository, for example in a ~/.gitignore file added
# with:
#
# git config --global core.excludesfile '~/.gitignore'
# Bytecompiled Python
*.py[cod]
# C extensions
*.so
# Packages
*.egg
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
.coverage
.tox
nosetests.xml
.testrepository
.stestr/*
cover/*
MagicMock/*
# Translations
*.mo
*.bundle
Gemfile.lock
# Built by pbr (python setup.py sdist):
AUTHORS
ChangeLog
# Sphinx
doc/build
# Editors
*~
.*.swp
.*sw?
# Leftovers from other commants like tox -e pep8,tht
.cache
common/post.yaml
common/services/blockstorage-role.yaml
common/services/cephstorage-role.yaml
common/services/compute-role.yaml
common/services/controller-role.yaml
common/services/objectstorage-role.yaml
deployment/apache/apache-baremetal-puppet.yaml
deployment/apache/apache-baremetal-ansible.yaml
deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml
deployment/haproxy/haproxy-internal-tls-certmonger.yaml
deployment/octavia/octavia-deployment-config.yaml
environments/deployed-ports.yaml
environments/deployed-network-environment.yaml
environments/deployed-server-environment.yaml
environments/net-2-linux-bonds-with-vlans.yaml
environments/net-bond-with-vlans-no-external.yaml
environments/net-bond-with-vlans.yaml
environments/net-dpdkbond-with-vlans.yaml
environments/net-multiple-nics-vlans.yaml
environments/net-multiple-nics.yaml
environments/net-noop.yaml
environments/net-single-nic-linux-bridge-with-vlans.yaml
environments/net-single-nic-with-vlans-no-external.yaml
environments/net-single-nic-with-vlans.yaml
environments/network-environment-v6-all.yaml
environments/network-environment-v6.yaml
environments/network-environment.yaml
environments/network-isolation-no-tunneling.yaml
environments/network-isolation-v6-all.yaml
environments/network-isolation-v6.yaml
environments/network-isolation.yaml
environments/networks-disable.yaml
environments/overcloud-baremetal.yaml
environments/split-stack-consistent-hostname-format.yaml
environments/ssl/enable-internal-tls.yaml
extraconfig/all_nodes/swap-partition.yaml
extraconfig/all_nodes/swap.yaml
extraconfig/nova_metadata/krb-service-principals/blockstorage-role.yaml
extraconfig/nova_metadata/krb-service-principals/cephstorage-role.yaml
extraconfig/nova_metadata/krb-service-principals/compute-role.yaml
extraconfig/nova_metadata/krb-service-principals/controller-role.yaml
extraconfig/nova_metadata/krb-service-principals/objectstorage-role.yaml
extraconfig/tasks/major_upgrade_pacemaker_init.yaml
network/external.yaml
network/external_v6.yaml
network/internal_api.yaml
network/internal_api_v6.yaml
network/management.yaml
network/management_v6.yaml
network/networks.yaml
network/ports/deployed_external.yaml
network/ports/deployed_internal_api.yaml
network/ports/deployed_management.yaml
network/ports/deployed_storage.yaml
network/ports/deployed_storage_mgmt.yaml
network/ports/deployed_tenant.yaml
network/ports/deployed_vip_external.yaml
network/ports/deployed_vip_internal_api.yaml
network/ports/deployed_vip_management.yaml
network/ports/deployed_vip_storage.yaml
network/ports/deployed_vip_storage_mgmt.yaml
network/ports/deployed_vip_tenant.yaml
network/ports/external.yaml
network/ports/external_from_pool.yaml
network/ports/external_from_pool_v6.yaml
network/ports/external_resource_external.yaml
network/ports/external_resource_external_v6.yaml
network/ports/external_resource_internal_api.yaml
network/ports/external_resource_internal_api_v6.yaml
network/ports/external_resource_management.yaml
network/ports/external_resource_management_v6.yaml
network/ports/external_resource_storage.yaml
network/ports/external_resource_storage_mgmt.yaml
network/ports/external_resource_storage_mgmt_v6.yaml
network/ports/external_resource_storage_v6.yaml
network/ports/external_resource_tenant.yaml
network/ports/external_resource_tenant_v6.yaml
network/ports/external_v6.yaml
network/ports/internal_api.yaml
network/ports/internal_api_from_pool.yaml
network/ports/internal_api_from_pool_v6.yaml
network/ports/internal_api_v6.yaml
network/ports/management.yaml
network/ports/management_from_pool.yaml
network/ports/management_from_pool_v6.yaml
network/ports/management_v6.yaml
network/ports/net_ip_list_map.yaml
network/ports/net_ip_map.yaml
network/ports/net_vip_map_external.yaml
network/ports/net_vip_map_external_v6.yaml
network/ports/storage.yaml
network/ports/storage_from_pool.yaml
network/ports/storage_from_pool_v6.yaml
network/ports/storage_mgmt.yaml
network/ports/storage_mgmt_from_pool.yaml
network/ports/storage_mgmt_from_pool_v6.yaml
network/ports/storage_mgmt_v6.yaml
network/ports/storage_v6.yaml
network/ports/tenant.yaml
network/ports/tenant_from_pool.yaml
network/ports/tenant_from_pool_v6.yaml
network/ports/tenant_v6.yaml
network/service_net_map.yaml
network/storage.yaml
network/storage_mgmt.yaml
network/storage_mgmt_v6.yaml
network/storage_v6.yaml
network/tenant.yaml
network/tenant_v6.yaml
overcloud-resource-registry-puppet.yaml
overcloud.yaml
puppet/blockstorage-config.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-config.yaml
puppet/cephstorage-role.yaml
puppet/compute-config.yaml
puppet/compute-role.yaml
puppet/controller-config.yaml
puppet/controller-role.yaml
puppet/objectstorage-config.yaml
puppet/objectstorage-role.yaml
puppet/post.yaml
tripleo_heat_templates/tests/ansible-pacemaker
tripleo_heat_templates/tests/tripleo-ansible
# Files created by releasenotes build
releasenotes/build

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=${TEST_PATH:-./}
top_dir=./

202
LICENSE
View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,134 +1,10 @@
========================
Team and repository tags
========================
This project is no longer maintained.
.. image:: https://governance.openstack.org/tc/badges/tripleo-heat-templates.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
.. Change things from this point on
======================
tripleo-heat-templates
======================
Heat templates to deploy OpenStack using OpenStack.
* Free software: Apache License (2.0)
* Documentation: https://docs.openstack.org/tripleo-docs/latest/
* Source: https://opendev.org/openstack/tripleo-heat-templates
* Bugs: https://bugs.launchpad.net/tripleo
* Release notes: https://docs.openstack.org/releasenotes/tripleo-heat-templates/
Features
--------
The ability to deploy a multi-node, role based OpenStack deployment using
OpenStack Heat. Notable features include:
* Choice of deployment/configuration tooling: puppet, (soon) docker
* Role based deployment: roles for the controller, compute, ceph, swift,
and cinder storage
* physical network configuration: support for isolated networks, bonding,
and standard ctlplane networking
Directories
-----------
A description of the directory layout in TripleO Heat Templates.
* environments: contains heat environment files that can be used with -e
on the command like to enable features, etc.
* extraconfig: templates used to enable 'extra' functionality. Includes
functionality for distro specific registration and upgrades.
* firstboot: example first_boot scripts that can be used when initially
creating instances.
* network: heat templates to help create isolated networks and ports
* puppet: templates mostly driven by configuration with puppet. To use these
templates you can use the overcloud-resource-registry-puppet.yaml.
* validation-scripts: validation scripts useful to all deployment
configurations
* roles: example roles that can be used with the tripleoclient to generate
a roles_data.yaml for a deployment See the
`roles/README.rst <roles/README.rst>`_ for additional details.
Service testing matrix
----------------------
The configuration for the CI scenarios will be defined in `tripleo-heat-templates/ci/`
and should be executed according to the following table:
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| - | scn000 | scn001 | scn002 | scn003 | scn004 | scn006 | scn007 | scn009 | scn010 | scn013 | non-ha | ovh-ha |
+================+========+========+========+========+========+========+========+========+========+========+========+========+
| keystone | X | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| glance | | rbd | swift | file | rgw | file | file | | rbd | file | file | file |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| cinder | | rbd | iscsi | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| heat | | X | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| ironic | | | | | | X | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| mysql | X | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| neutron | | ovn | ovn | ovn | ovn | ovn | ovs | | ovn | ovn | ovn | ovn |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| neutron-bgpvpn | | | | | wip | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| neutron-l2gw | | | | | wip | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| om-rpc | | rabbit | rabbit | amqp1 | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| om-notify | | rabbit | rabbit | rabbit | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| redis | | X | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| haproxy | | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| memcached | | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| pacemaker | | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| nova | | qemu | qemu | qemu | qemu | ironic | qemu | | qemu | qemu | qemu | qemu |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| placement | | X | X | X | X | X | X | | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| ntp | X | X | X | X | X | X | X | X | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| snmp | X | X | X | X | X | X | X | X | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| timezone | X | X | X | X | X | X | X | X | X | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| swift | | | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| aodh | | X | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| ceilometer | | X | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| gnocchi | | rbd | swift | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| barbican | | | X | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| cephrgw | | | | | X | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| cephmds | | | | | X | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| manila | | | | | X | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| collectd | | X | | | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| designate | | | | X | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| octavia | | | | | | | | | X | X | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Extra Firewall | | | | X | | | | | | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1 +0,0 @@
[python: **.py]

View File

@ -1,122 +0,0 @@
- name: Controller
count: 3
defaults:
network_config:
template: templates/net_config_bridge.j2
default_route_network:
- external
instances:
- hostname: overcloud-controller-0
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.10
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.80
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.85
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.148
- network: storage_mgmt
subnet: storage_mgmt_subnet
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.244
- hostname: overcloud-controller-1
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.11
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.81
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.86
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.149
- network: storage_mgmt
subnet: storage_mgmt_subnet
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.245
- hostname: overcloud-controller-2
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.12
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.82
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.87
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.150
- network: storage_mgmt
subnet: storage_mgmt_subnet
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.246
- name: Compute
count: 3
defaults:
network_config:
template: ""
instances:
- hostname: overcloud-compute-0
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.20
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.90
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.95
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.158
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.144
- hostname: overcloud-compute-1
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.21
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.91
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.96
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.159
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.145
- hostname: overcloud-compute-2
managed: true
networks:
- network: ctlplane
fixed_ip: 192.168.24.22
- network: external
subnet: external_subnet
fixed_ip: 172.20.12.92
- network: internal_api
subnet: internal_api_subnet
fixed_ip: 172.16.2.98
- network: storage
subnet: storage_subnet
fixed_ip: 172.16.1.160
- network: tenant
subnet: tenant_subnet
fixed_ip: 172.16.0.146

View File

@ -1,11 +0,0 @@
# This is a cross-platform list tracking distribution packages needed by tests;
# see https://docs.openstack.org/infra/bindep/ for additional information.
libssl-dev [platform:dpkg]
openssl-devel [platform:rpm]
gcc [platform:rpm]
python36-devel [platform:rhel8 platform:centos-8]
python3-devel [platform:rpm !platform:rhel8 !platform:centos-8]
# For SELinux
python3-libselinux [platform:rpm]
python3-libsemanage [platform:rpm]

View File

@ -1,11 +0,0 @@
=======================
TripleO CI environments
=======================
TripleO CI environments are exclusively used for Continuous Integration
purpose or for development usage.
They should not be used in production and we don't guarantee they work outside
TripleO CI.
For more information about TripleO CI, please look:
https://github.com/openstack-infra/tripleo-ci

View File

@ -1,17 +0,0 @@
heat_template_version: wallaby
parameters:
servers:
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
resources:
IronicVBMCSetup:
type: vbmc_setup.yaml
properties:
servers: {get_param: servers}

View File

@ -1,64 +0,0 @@
heat_template_version: wallaby
parameters:
servers:
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
resources:
ExtraConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: |
#!/bin/bash
set -e
dnf install -y python3-virtualbmc libvirt-client libvirt-daemon libvirt qemu-kvm
systemctl start libvirtd virtualbmc
git clone https://opendev.org/openstack/ironic /tmp/ironic
mkdir -p /var/log/bmlogs
chmod 777 /var/log/bmlogs
# Create a ssh keypair and place the private key somewhere ansible inside the
# neutron_api container can read it.
ssh-keygen -P "" -f /etc/puppet/ci-key
chmod 644 /etc/puppet/ci-key
cat /etc/puppet/ci-key.pub >> /root/.ssh/authorized_keys
LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images}
mkdir -p $LIBVIRT_STORAGE_POOL_PATH
if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target $LIBVIRT_STORAGE_POOL_PATH
virsh pool-autostart $LIBVIRT_STORAGE_POOL
virsh pool-start $LIBVIRT_STORAGE_POOL
fi
pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }')
if [ "$pool_state" != "running" ] ; then
virsh pool-start $LIBVIRT_STORAGE_POOL
fi
/tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node1 -c 1 -m 3072 -d 10 -b br-ex -p 1161 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:11 > /var/log/bmlogs/create-node-1.log 2>&1 < /dev/null
/tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node2 -c 1 -m 3072 -d 10 -b br-ex -p 1162 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:22 > /var/log/bmlogs/create-node-2.log 2>&1 < /dev/null
vbmc --no-daemon add node1 --port 1161
vbmc --no-daemon start node1
vbmc --no-daemon add node2 --port 1162
vbmc --no-daemon start node2
disown -a
ExtraDeployments:
type: OS::Heat::SoftwareDeploymentGroup
properties:
servers: {get_param: servers}
config: {get_resource: ExtraConfig}
actions: ['CREATE'] # Only do this on CREATE
name: VirtNodeExtraConfig

View File

@ -1,254 +0,0 @@
###############################################################################
# File generated by TripleO
###############################################################################
###############################################################################
# Role: Controller #
###############################################################################
- name: Controller
description: |
Controller role that has all the controller services loaded and handles
Database, Messaging and Network functions.
CountDefault: 1
tags:
- primary
- controller
# Create external Neutron bridge for SNAT (and floating IPs when using
# ML2/OVS without DVR)
- external_bridge
networks:
ExternalCloud1:
subnet: external_cloud_1_subnet
InternalApiCloud1:
subnet: internal_api_cloud_1_subnet
StorageCloud1:
subnet: storage_cloud_1_subnet
StorageMgmtCloud1:
subnet: storage_mgmt_cloud_1_subnet
TenantCloud1:
subnet: tenant_cloud_1_subnet
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['ExternalCloud1']
HostnameFormatDefault: '%stackname%-controller-%index%'
RoleParametersDefault:
OVNCMSOptions: "enable-chassis-as-gw"
# Deprecated & backward-compatible values (FIXME: Make parameters consistent)
# Set uses_deprecated_params to True if any deprecated params are used.
uses_deprecated_params: True
deprecated_param_extraconfig: 'controllerExtraConfig'
update_serial: 1
ServicesDefault:
- OS::TripleO::Services::Aide
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhListener
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BarbicanApi
- OS::TripleO::Services::BarbicanBackendSimpleCrypto
- OS::TripleO::Services::BarbicanBackendDogtag
- OS::TripleO::Services::BarbicanBackendKmip
- OS::TripleO::Services::BarbicanBackendPkcs11Crypto
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CephGrafana
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMgr
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephRbdMirror
- OS::TripleO::Services::CephRgw
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackendDellSc
- OS::TripleO::Services::CinderBackendDellEMCPowerFlex
- OS::TripleO::Services::CinderBackendDellEMCPowermax
- OS::TripleO::Services::CinderBackendDellEMCPowerStore
- OS::TripleO::Services::CinderBackendDellEMCSc
- OS::TripleO::Services::CinderBackendDellEMCUnity
- OS::TripleO::Services::CinderBackendDellEMCVNX
- OS::TripleO::Services::CinderBackendDellEMCXtremio
- OS::TripleO::Services::CinderBackendIBMSvf
- OS::TripleO::Services::CinderBackendNetApp
- OS::TripleO::Services::CinderBackendNfs
- OS::TripleO::Services::CinderBackendNVMeOF
- OS::TripleO::Services::CinderBackendPure
- OS::TripleO::Services::CinderBackendRbd
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::DesignateApi
- OS::TripleO::Services::DesignateCentral
- OS::TripleO::Services::DesignateProducer
- OS::TripleO::Services::DesignateWorker
- OS::TripleO::Services::DesignateMDNS
- OS::TripleO::Services::DesignateSink
- OS::TripleO::Services::DesignateBind
- OS::TripleO::Services::Etcd
- OS::TripleO::Services::ExternalSwiftProxy
- OS::TripleO::Services::Frr
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceApiInternal
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::IpaClient
- OS::TripleO::Services::Ipsec
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
- OS::TripleO::Services::IronicInspector
- OS::TripleO::Services::IronicPxe
- OS::TripleO::Services::IronicNeutronAgent
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::ManilaApi
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendFlashBlade
- OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendPowerMax
- OS::TripleO::Services::ManilaBackendUnity
- OS::TripleO::Services::ManilaBackendVNX
- OS::TripleO::Services::ManilaScheduler
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MetricsQdr
- OS::TripleO::Services::Multipathd
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronSfcApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL2gwAgent
- OS::TripleO::Services::NeutronL2gwApi
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronAgentsIBConfig
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaIronic
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::NovaVncProxy
- OS::TripleO::Services::ContainersLogrotateCrond
- OS::TripleO::Services::OctaviaApi
- OS::TripleO::Services::OctaviaDeploymentConfig
- OS::TripleO::Services::OctaviaHealthManager
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
- OS::TripleO::Services::OpenStackClients
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Redis
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Rsyslog
- OS::TripleO::Services::RsyslogSidecar
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::SwiftProxy
- OS::TripleO::Services::SwiftDispersion
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::Tuned
- OS::TripleO::Services::Unbound
###############################################################################
# Role: Compute #
###############################################################################
- name: Compute
description: |
Basic Compute Node role
CountDefault: 1
# Create external Neutron bridge (unset if using ML2/OVS without DVR)
tags:
- compute
- external_bridge
networks:
InternalApiCloud1:
subnet: internal_api_cloud_1_subnet
TenantCloud1:
subnet: tenant_cloud_1_subnet
StorageCloud1:
subnet: storage_cloud_1_subnet
HostnameFormatDefault: '%stackname%-novacompute-%index%'
RoleParametersDefault:
TunedProfileName: "virtual-host"
# Deprecated & backward-compatible values (FIXME: Make parameters consistent)
# Set uses_deprecated_params to True if any deprecated params are used.
# These deprecated_params only need to be used for existing roles and not for
# composable roles.
uses_deprecated_params: True
deprecated_param_extraconfig: 'NovaComputeExtraConfig'
deprecated_param_metadata: 'NovaComputeServerMetadata'
deprecated_param_ips: 'NovaComputeIPs'
deprecated_server_resource_name: 'NovaCompute'
update_serial: 25
ServicesDefault:
- OS::TripleO::Services::Aide
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::ComputeNeutronCorePlugin
- OS::TripleO::Services::ComputeNeutronL3Agent
- OS::TripleO::Services::ComputeNeutronMetadataAgent
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Frr
- OS::TripleO::Services::IpaClient
- OS::TripleO::Services::Ipsec
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::MetricsQdr
- OS::TripleO::Services::Multipathd
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronBgpVpnBagpipe
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::NovaAZConfig
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaLibvirtGuests
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::ContainersLogrotateCrond
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Rsyslog
- OS::TripleO::Services::RsyslogSidecar
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::Tuned
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::OVNMetadataAgent

View File

@ -1,60 +0,0 @@
- name: ExternalCloud1
name_lower: external_cloud_1
mtu: 1350
vip: true
dns_domain: externalcloud1.ooo.test.
service_net_map_replace: external
subnets:
external_cloud_1_subnet:
ip_subnet: 10.0.0.0/24
gateway_ip: 10.0.0.1
allocation_pools:
- start: 10.0.0.10
end: 10.0.0.50
- name: InternalApiCloud1
name_lower: internal_api_cloud_1
mtu: 1350
vip: true
dns_domain: internalapicloud1.ooo.test.
service_net_map_replace: internal_api
subnets:
internal_api_cloud_1_subnet:
ip_subnet: 172.17.0.0/24
allocation_pools:
- start: 172.17.0.10
end: 172.17.0.250
- name: StorageCloud1
name_lower: storage_cloud_1
mtu: 1350
vip: true
dns_domain: storagecloud1.ooo.test.
service_net_map_replace: storage
subnets:
storage_cloud_1_subnet:
ip_subnet: 172.18.0.0/24
allocation_pools:
- start: 172.18.0.10
end: 172.18.0.250
- name: StorageMgmtCloud1
name_lower: storage_mgmt_cloud_1
mtu: 1350
vip: true
dns_domain: storagemgmtcloud1.ooo.test.
service_net_map_replace: storage_mgmt
subnets:
storage_mgmt_cloud_1_subnet:
ip_subnet: 172.19.0.0/24
allocation_pools:
- start: 172.19.0.10
end: 172.19.0.250
- name: TenantCloud1
name_lower: tenant_cloud_1
mtu: 1350
dns_domain: tenantcloud1.ooo.test.
service_net_map_replace: tenant
subnets:
tenant_cloud_1_subnet:
ip_subnet: 172.16.0.0/24
allocation_pools:
- start: 172.16.0.10
end: 172.16.0.250

View File

@ -1,6 +0,0 @@
- network: storage_mgmt_cloud_1
- network: internal_api_cloud_1
- network: storage_cloud_1
- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP
network: external_cloud_1
- network: ctlplane

View File

@ -1,4 +0,0 @@
This directory contains environments that are used in tripleo-ci. They may change from
release to release or within a release, and should not be relied upon in a production
environment. The top-level ``environments`` directory in tripleo-heat-templates
contains the production-ready environment files.

View File

@ -1,2 +0,0 @@
parameter_defaults:
CephPoolDefaultSize: 1

View File

@ -1,52 +0,0 @@
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2'
ControllerServices:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceApiInternal
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::OVNMetadataAgent
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::ContainersLogrotateCrond
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Multipathd
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::NovaManager
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::virt_type: qemu
Debug: True
DockerPuppetDebug: True
NotificationDriver: 'noop'
GlanceBackend: 'file'
ContainerCli: podman

View File

@ -1,34 +0,0 @@
heat_template_version: wallaby
description: >
OpenStack Core Service
parameters:
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
Debug:
type: boolean
default: false
description: Set to True to enable debugging on all services.
resources:
outputs:
role_data:
description: Role data for the multinode firewall configuration
value:
service_name: multinode_core
firewall_rules:
'999 core':
proto: 'udp'
dport:
- 4789

View File

@ -1,23 +0,0 @@
Generated Network Isolation Templates
-------------------------------------
These templates were generated by the UI tool at
https://github.com/cybertron/tripleo-scripts#net-iso-genpy
ui-settings.pickle is specific to the tool. TripleO will not use it when
doing deployments with these templates, but it is needed to be able to
load the templates into the UI again. Note that the UI only reads this file,
so any changes made by hand to the templates will not be reflected in the UI.
The network-isolation.yaml file needs to reference the port files shipped with
tripleo-heat-templates, so by default the tool generates the paths assuming
network-isolation.yaml will be copied into the environments/ directory of
tripleo-heat-templates.
If the standard tripleo-heat-templates are in use, then the
network-isolation-absolute.yaml file can be used instead. It has hard-coded
references to the port files in /usr/share/openstack-tripleo-heat-templates.
If the generated network isolation templates are at ~/generated-templates, an
example deployment command would look like:
openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml

View File

@ -1,18 +0,0 @@
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2'
ExternalNetCidr: 2001:db8:fd00:1000::/64
ExternalAllocationPools: [{"start": "2001:db8:fd00:1000::10", "end": "2001:db8:fd00:1000:ffff:ffff:ffff:fffe"}]
ExternalInterfaceDefaultRoute: 2001:db8:fd00:1000::1
InternalApiNetCidr: fd00:fd00:fd00:2000::/64
InternalApiAllocationPools: [{"start": "fd00:fd00:fd00:2000::10", "end": "fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe"}]
StorageNetCidr: fd00:fd00:fd00:3000::/64
StorageAllocationPools: [{"start": "fd00:fd00:fd00:3000::10", "end": "fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe"}]
StorageMgmtNetCidr: fd00:fd00:fd00:4000::/64
StorageMgmtAllocationPools: [{"start": "fd00:fd00:fd00:4000::10", "end": "fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe"}]
TenantNetCidr: 172.16.0.0/24
TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}]

View File

@ -1,29 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external_v6.yaml
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml
# InternalApi
OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api_v6.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml
OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml
# Storage
OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage_v6.yaml
OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml
OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml
OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt_v6.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml
# Tenant
OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
parameter_defaults:
ManilaIPv6: True

View File

@ -1,28 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: ../../../../network/external_v6.yaml
OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external_v6.yaml
# InternalApi
OS::TripleO::Network::InternalApi: ../../../../network/internal_api_v6.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml
# Storage
OS::TripleO::Network::Storage: ../../../../network/storage_v6.yaml
OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml
OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt_v6.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt_v6.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml
# Tenant
OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml
parameter_defaults:
ManilaIPv6: True

View File

@ -1,732 +0,0 @@
(dp0
S'global_data'
p1
(dp2
S'control'
p3
(dp4
S'route'
p5
V192.168.24.1
p6
sS'mask'
p7
I24
sS'ec2'
p8
V192.168.24.1
p9
ssS'major'
p10
I1
sS'management'
p11
(dp12
S'start'
p13
V172.20.0.10
p14
sS'cidr'
p15
V172.20.0.0/24
p16
sS'vlan'
p17
I6
sS'end'
p18
V172.20.0.250
p19
ssS'dns2'
p20
V8.8.4.4
p21
sS'dns1'
p22
V8.8.8.8
p23
sS'storage'
p24
(dp25
g13
Vfd00:fd00:fd00:3000::10
p26
sg15
Vfd00:fd00:fd00:3000::/64
p27
sg17
I3
sg18
Vfd00:fd00:fd00:3000:ffff:ffff:ffff:fffe
p28
ssS'auto_routes'
p29
I00
sS'bond_options'
p30
V
p31
sS'version'
p32
I2
sS'external'
p33
(dp34
S'bridge'
p35
V''
p36
sg18
V2001:db8:fd00:1000:ffff:ffff:ffff:fffe
p37
sg17
I1
sg13
V2001:db8:fd00:1000::10
p38
sg15
V2001:db8:fd00:1000::/64
p39
sS'gateway'
p40
V2001:db8:fd00:1000::1
p41
ssS'internal_api'
p42
(dp43
g13
Vfd00:fd00:fd00:2000::10
p44
sg15
Vfd00:fd00:fd00:2000::/64
p45
sg17
I2
sg18
Vfd00:fd00:fd00:2000:ffff:ffff:ffff:fffe
p46
ssS'ipv6'
p47
I01
sS'storage_mgmt'
p48
(dp49
g13
Vfd00:fd00:fd00:4000::10
p50
sg15
Vfd00:fd00:fd00:4000::/64
p51
sg17
I4
sg18
Vfd00:fd00:fd00:4000:ffff:ffff:ffff:fffe
p52
ssS'minor'
p53
I2
sS'tenant'
p54
(dp55
g13
V172.16.0.10
p56
sg15
V172.16.0.0/24
p57
sg17
I5
sg18
V172.16.0.250
p58
sssS'data'
p59
(dp60
S'cinder-storage.yaml'
p61
(lp62
sS'ceph-storage.yaml'
p63
(lp64
(dp65
Vaddresses
p66
(lp67
sVnetwork
p68
VControlPlane
p69
sVprimary
p70
I01
sVmtu
p71
I1350
sS'members'
p72
(lp73
(dp74
Vip_netmask
p75
V0.0.0.0/0
p76
sVname
p77
VRoute
p78
sVdefault
p79
I01
sVnext_hop
p80
V{get_param: ControlPlaneDefaultRoute}
p81
sg72
(lp82
sVtype
p83
Vroute
p84
sasVroutes
p85
(lp86
sVuse_dhcp
p87
I00
sVtype
p88
Vinterface
p89
sVname
p90
Vnic1
p91
sa(dp92
Vaddresses
p93
(lp94
sVnetwork
p95
VStorage
p96
sVprimary
p97
I01
sVmtu
p98
I1350
sg72
(lp99
sVroutes
p100
(lp101
sVuse_dhcp
p102
I00
sVtype
p103
Vinterface
p104
sVname
p105
Vnic4
p106
sa(dp107
Vaddresses
p108
(lp109
sVnetwork
p110
VStorageMgmt
p111
sVprimary
p112
I01
sVmtu
p113
I1350
sg72
(lp114
sVroutes
p115
(lp116
sVuse_dhcp
p117
I00
sVtype
p118
Vinterface
p119
sVname
p120
Vnic5
p121
sasS'controller.yaml'
p122
(lp123
(dp124
Vaddresses
p125
(lp126
sVnetwork
p127
VControlPlane
p128
sVprimary
p129
I01
sVmtu
p130
I1350
sg72
(lp131
(dp132
Vip_netmask
p133
V0.0.0.0/0
p134
sVname
p135
VRoute
p136
sVdefault
p137
I01
sVnext_hop
p138
V{get_param: ControlPlaneDefaultRoute}
p139
sg72
(lp140
sVtype
p141
Vroute
p142
sasVroutes
p143
(lp144
sVuse_dhcp
p145
I00
sVtype
p146
Vinterface
p147
sVname
p148
Vnic1
p149
sa(dp150
Vdns_servers
p151
V{get_param: DnsServers}
p152
sVaddresses
p153
(lp154
sVnetwork
p155
VExternal
p156
sVmtu
p157
I-1
sg72
(lp158
(dp159
Vaddresses
p160
(lp161
sVnetwork
p162
VNone
p163
sVprimary
p164
I01
sVmtu
p165
I1350
sg72
(lp166
sVroutes
p167
(lp168
sVuse_dhcp
p169
I00
sVtype
p170
Vinterface
p171
sVname
p172
Vnic2
p173
sa(dp174
Vip_netmask
p175
V0.0.0.0/0
p176
sVname
p177
VRoute
p178
sVdefault
p179
I01
sVnext_hop
p180
V{get_param: ExternalInterfaceDefaultRoute}
p181
sg72
(lp182
sVtype
p183
Vroute
p184
sasVroutes
p185
(lp186
sVuse_dhcp
p187
I00
sVtype
p188
Vovs_bridge
p189
sVname
p190
Vbr-ex
p191
sa(dp192
Vaddresses
p193
(lp194
sVnetwork
p195
VInternalApi
p196
sVprimary
p197
I01
sVmtu
p198
I1350
sg72
(lp199
sVroutes
p200
(lp201
sVuse_dhcp
p202
I00
sVtype
p203
Vinterface
p204
sVname
p205
Vnic3
p206
sa(dp207
Vaddresses
p208
(lp209
sVnetwork
p210
VStorage
p211
sVprimary
p212
I01
sVmtu
p213
I1350
sg72
(lp214
sVroutes
p215
(lp216
sVuse_dhcp
p217
I00
sVtype
p218
Vinterface
p219
sVname
p220
Vnic4
p221
sa(dp222
Vaddresses
p223
(lp224
sVnetwork
p225
VStorageMgmt
p226
sVprimary
p227
I01
sVmtu
p228
I1350
sg72
(lp229
sVroutes
p230
(lp231
sVuse_dhcp
p232
I00
sVtype
p233
Vinterface
p234
sVname
p235
Vnic5
p236
sa(dp237
Vdns_servers
p238
V{get_param: DnsServers}
p239
sVaddresses
p240
(lp241
sVnetwork
p242
VTenant
p243
sVmtu
p244
I-1
sg72
(lp245
(dp246
Vaddresses
p247
(lp248
sVnetwork
p249
VNone
p250
sVprimary
p251
I01
sVmtu
p252
I1350
sg72
(lp253
sVroutes
p254
(lp255
sVuse_dhcp
p256
I00
sVtype
p257
Vinterface
p258
sVname
p259
Vnic6
p260
sasVroutes
p261
(lp262
sVuse_dhcp
p263
I00
sVtype
p264
Vovs_bridge
p265
sVname
p266
Vbr-tenant
p267
sasS'swift-storage.yaml'
p268
(lp269
sS'compute.yaml'
p270
(lp271
(dp272
Vaddresses
p273
(lp274
sVnetwork
p275
VControlPlane
p276
sVprimary
p277
I01
sVmtu
p278
I1350
sg72
(lp279
(dp280
Vip_netmask
p281
V0.0.0.0/0
p282
sVname
p283
VRoute
p284
sVdefault
p285
I01
sVnext_hop
p286
V{get_param: ControlPlaneDefaultRoute}
p287
sg72
(lp288
sVtype
p289
Vroute
p290
sasVroutes
p291
(lp292
sVuse_dhcp
p293
I00
sVtype
p294
Vinterface
p295
sVname
p296
Vnic1
p297
sa(dp298
Vaddresses
p299
(lp300
sVnetwork
p301
VInternalApi
p302
sVprimary
p303
I01
sVmtu
p304
I1350
sg72
(lp305
sVroutes
p306
(lp307
sVuse_dhcp
p308
I00
sVtype
p309
Vinterface
p310
sVname
p311
Vnic3
p312
sa(dp313
Vaddresses
p314
(lp315
sVnetwork
p316
VStorage
p317
sVprimary
p318
I01
sVmtu
p319
I1350
sg72
(lp320
sVroutes
p321
(lp322
sVuse_dhcp
p323
I00
sVtype
p324
Vinterface
p325
sVname
p326
Vnic4
p327
sa(dp328
Vdns_servers
p329
V{get_param: DnsServers}
p330
sVaddresses
p331
(lp332
sVnetwork
p333
VTenant
p334
sVmtu
p335
I-1
sg72
(lp336
(dp337
Vaddresses
p338
(lp339
sVnetwork
p340
VNone
p341
sVprimary
p342
I01
sVmtu
p343
I1350
sg72
(lp344
sVroutes
p345
(lp346
sVuse_dhcp
p347
I00
sVtype
p348
Vinterface
p349
sVname
p350
Vnic6
p351
sasVroutes
p352
(lp353
sVuse_dhcp
p354
I00
sVtype
p355
Vovs_bridge
p356
sVname
p357
Vbr-tenant
p358
sass.

View File

@ -1,23 +0,0 @@
Generated Network Isolation Templates
-------------------------------------
These templates were generated by the UI tool at
https://github.com/cybertron/tripleo-scripts#net-iso-genpy
ui-settings.pickle is specific to the tool. TripleO will not use it when
doing deployments with these templates, but it is needed to be able to
load the templates into the UI again. Note that the UI only reads this file,
so any changes made by hand to the templates will not be reflected in the UI.
The network-isolation.yaml file needs to reference the port files shipped with
tripleo-heat-templates, so by default the tool generates the paths assuming
network-isolation.yaml will be copied into the environments/ directory of
tripleo-heat-templates.
If the standard tripleo-heat-templates are in use, then the
network-isolation-absolute.yaml file can be used instead. It has hard-coded
references to the port files in /usr/share/openstack-tripleo-heat-templates.
If the generated network isolation templates are at ~/generated-templates, an
example deployment command would look like:
openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml

View File

@ -1,27 +0,0 @@
parameter_merge_strategies:
ServiceNetMap: merge
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2'
ComputeNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2'
CloudNameExternalCloud1: overcloud1.ooo.test
CloudNameInternalApiCloud1: overcloud1.internalapicloud1.ooo.test
CloudNameStorageCloud1: overcloud1.storagecloud1.ooo.test
CloudNameStorageMgmtCloud1: overcloud1.storagemgmtcloud1.ooo.test
CloudNameCtlplane: overcloud1.ctlplane.ooo.test
CloudDomain: ooo.test
VipSubnetMap:
ctlplane: ctlplane-subnet
InternalApiCloud1: internal_api_cloud_1_subnet
StorageCloud1: storage_cloud_1_subnet
StorageMgmtCloud1: storage_mgmt_cloud_1_subnet
TenantCloud1: tenant_cloud_1_subnet
ExternalCloud1: external_cloud_1_subnet
redis: internal_api_cloud_1_subnet
ovn_dbs: internal_api_cloud_1_subnet
ServiceNetMap:
ComputeHostnameResolveNetwork: internal_api_cloud_1
ControllerHostnameResolveNetwork: internal_api_cloud_1
CephStorageHostnameResolveNetwork: storage_cloud_1
ControllerMetricsQdrNetwork: internal_api_cloud_1
ComputeMetricsQdrNetwork: internal_api_cloud_1
CephStorageMetricsQdrNetwork: storage_cloud_1

View File

@ -1,19 +0,0 @@
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2'
ExternalNetCidr: 10.0.0.0/24
ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}]
ExternalInterfaceDefaultRoute: 10.0.0.1
PublicVirtualFixedIPs: [{ "ip_address": "10.0.0.5" }]
InternalApiNetCidr: 172.17.0.0/24
InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}]
StorageNetCidr: 172.18.0.0/24
StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}]
StorageMgmtNetCidr: 172.19.0.0/24
StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}]
TenantNetCidr: 172.16.0.0/24
TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}]

View File

@ -1,32 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
OS::TripleO::CellController::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::CellController::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::CellController::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::CellController::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::CellController::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml

View File

@ -1,27 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: ../../../../network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: ../../../../network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml
OS::TripleO::Compute::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml

View File

@ -1,716 +0,0 @@
(dp0
S'global_data'
p1
(dp2
S'control'
p3
(dp4
S'route'
p5
V192.168.24.1
p6
sS'mask'
p7
I24
sS'ec2'
p8
V192.168.24.1
p9
ssS'major'
p10
I1
sS'management'
p11
(dp12
S'start'
p13
V172.20.0.10
p14
sS'cidr'
p15
V172.20.0.0/24
p16
sS'vlan'
p17
I6
sS'end'
p18
V172.20.0.250
p19
ssS'dns2'
p20
V8.8.4.4
p21
sS'dns1'
p22
V8.8.8.8
p23
sS'storage'
p24
(dp25
g13
V172.18.0.10
p26
sg15
V172.18.0.0/24
p27
sg17
I3
sg18
V172.18.0.250
p28
ssS'auto_routes'
p29
I01
sS'bond_options'
p30
V
p31
sS'version'
p32
I1
sS'external'
p33
(dp34
S'bridge'
p35
V''
p36
sg18
V10.0.0.50
p37
sg17
I1
sg13
V10.0.0.10
p38
sg15
V10.0.0.0/24
p39
sS'gateway'
p40
V10.0.0.1
p41
ssS'internal_api'
p42
(dp43
g13
V172.17.0.10
p44
sg15
V172.17.0.0/24
p45
sg17
I2
sg18
V172.17.0.250
p46
ssS'ipv6'
p47
I00
sS'storage_mgmt'
p48
(dp49
g13
V172.19.0.10
p50
sg15
V172.19.0.0/24
p51
sg17
I4
sg18
V172.19.0.250
p52
ssS'minor'
p53
I2
sS'tenant'
p54
(dp55
g13
V172.16.0.10
p56
sg15
V172.16.0.0/24
p57
sg17
I5
sg18
V172.16.0.250
p58
sssS'data'
p59
(dp60
S'cinder-storage.yaml'
p61
(lp62
sS'ceph-storage.yaml'
p63
(lp64
(dp65
Vaddresses
p66
(lp67
sVnetwork
p68
VControlPlane
p69
sVprimary
p70
I01
sVmtu
p71
I1350
sVport_name
p72
g31
sS'members'
p73
(lp74
sVroutes
p75
(lp76
sVuse_dhcp
p77
I00
sVtype
p78
Vinterface
p79
sVname
p80
Vnic1
p81
sa(dp82
Vaddresses
p83
(lp84
sVnetwork
p85
VStorage
p86
sVprimary
p87
I01
sVmtu
p88
I1350
sVport_name
p89
g31
sg73
(lp90
sVroutes
p91
(lp92
sVuse_dhcp
p93
I00
sVtype
p94
Vinterface
p95
sVname
p96
Vnic4
p97
sa(dp98
Vaddresses
p99
(lp100
sVnetwork
p101
VStorageMgmt
p102
sVprimary
p103
I01
sVmtu
p104
I1350
sVport_name
p105
g31
sg73
(lp106
sVroutes
p107
(lp108
sVuse_dhcp
p109
I00
sVtype
p110
Vinterface
p111
sVname
p112
Vnic5
p113
sasS'controller.yaml'
p114
(lp115
(dp116
Vaddresses
p117
(lp118
sVnetwork
p119
VControlPlane
p120
sVprimary
p121
I01
sVmtu
p122
I1350
sVport_name
p123
g31
sg73
(lp124
sVroutes
p125
(lp126
sVuse_dhcp
p127
I00
sVtype
p128
Vinterface
p129
sVname
p130
Vnic1
p131
sa(dp132
Vdns_servers
p133
V{get_param: DnsServers}
p134
sVaddresses
p135
(lp136
sVnetwork
p137
VExternal
p138
sVmtu
p139
I-1
sg73
(lp140
(dp141
Vaddresses
p142
(lp143
sVnetwork
p144
VNone
p145
sVprimary
p146
I01
sVmtu
p147
I1350
sVport_name
p148
g31
sg73
(lp149
sVroutes
p150
(lp151
sVuse_dhcp
p152
I00
sVtype
p153
Vinterface
p154
sVname
p155
Vnic2
p156
sasVroutes
p157
(lp158
sVuse_dhcp
p159
I00
sVtype
p160
Vovs_bridge
p161
sVname
p162
Vbr-ex
p163
sa(dp164
Vaddresses
p165
(lp166
sVnetwork
p167
VInternalApi
p168
sVprimary
p169
I01
sVmtu
p170
I1350
sVport_name
p171
g31
sg73
(lp172
sVroutes
p173
(lp174
sVuse_dhcp
p175
I00
sVtype
p176
Vinterface
p177
sVname
p178
Vnic3
p179
sa(dp180
Vaddresses
p181
(lp182
sVnetwork
p183
VStorage
p184
sVprimary
p185
I01
sVmtu
p186
I1350
sVport_name
p187
g31
sg73
(lp188
sVroutes
p189
(lp190
sVuse_dhcp
p191
I00
sVtype
p192
Vinterface
p193
sVname
p194
Vnic4
p195
sa(dp196
Vaddresses
p197
(lp198
sVnetwork
p199
VStorageMgmt
p200
sVprimary
p201
I01
sVmtu
p202
I1350
sVport_name
p203
g31
sg73
(lp204
sVroutes
p205
(lp206
sVuse_dhcp
p207
I00
sVtype
p208
Vinterface
p209
sVname
p210
Vnic5
p211
sa(dp212
Vdns_servers
p213
V{get_param: DnsServers}
p214
sVaddresses
p215
(lp216
sVnetwork
p217
VTenant
p218
sVmtu
p219
I-1
sg73
(lp220
(dp221
Vaddresses
p222
(lp223
sVnetwork
p224
VNone
p225
sVprimary
p226
I01
sVmtu
p227
I1350
sVport_name
p228
g31
sg73
(lp229
sVroutes
p230
(lp231
sVuse_dhcp
p232
I00
sVtype
p233
Vinterface
p234
sVname
p235
Vnic6
p236
sasVroutes
p237
(lp238
sVuse_dhcp
p239
I00
sVtype
p240
Vovs_bridge
p241
sVname
p242
Vbr-tenant
p243
sasS'swift-storage.yaml'
p244
(lp245
sS'compute.yaml'
p246
(lp247
(dp248
Vaddresses
p249
(lp250
sVnetwork
p251
VControlPlane
p252
sVprimary
p253
I01
sVmtu
p254
I1350
sVport_name
p255
g31
sg73
(lp256
sVroutes
p257
(lp258
sVuse_dhcp
p259
I00
sVtype
p260
Vinterface
p261
sVname
p262
Vnic1
p263
sa(dp264
Vaddresses
p265
(lp266
sVnetwork
p267
VInternalApi
p268
sVprimary
p269
I01
sVmtu
p270
I1350
sVport_name
p271
g31
sg73
(lp272
sVroutes
p273
(lp274
sVuse_dhcp
p275
I00
sVtype
p276
Vinterface
p277
sVname
p278
Vnic3
p279
sa(dp280
Vaddresses
p281
(lp282
sVnetwork
p283
VStorage
p284
sVprimary
p285
I01
sVmtu
p286
I1350
sVport_name
p287
g31
sg73
(lp288
sVroutes
p289
(lp290
sVuse_dhcp
p291
I00
sVtype
p292
Vinterface
p293
sVname
p294
Vnic4
p295
sa(dp296
Vdns_servers
p297
V{get_param: DnsServers}
p298
sVaddresses
p299
(lp300
sVnetwork
p301
VTenant
p302
sVmtu
p303
I-1
sg73
(lp304
(dp305
Vaddresses
p306
(lp307
sVnetwork
p308
VNone
p309
sVprimary
p310
I01
sVmtu
p311
I1350
sVport_name
p312
g31
sg73
(lp313
sVroutes
p314
(lp315
sVuse_dhcp
p316
I00
sVtype
p317
Vinterface
p318
sVname
p319
Vnic6
p320
sasVroutes
p321
(lp322
sVuse_dhcp
p323
I00
sVtype
p324
Vovs_bridge
p325
sVname
p326
Vbr-tenant
p327
sa(dp328
Vaddresses
p329
(lp330
sVnetwork
p331
VStorageMgmt
p332
sVprimary
p333
I01
sVmtu
p334
I1350
sVport_name
p335
g31
sg73
(lp336
sVroutes
p337
(lp338
sVuse_dhcp
p339
I00
sVtype
p340
Vinterface
p341
sVname
p342
Vnic5
p343
sass.

View File

@ -1,23 +0,0 @@
Generated Network Isolation Templates
-------------------------------------
These templates were generated by the UI tool at
https://github.com/cybertron/tripleo-scripts#net-iso-genpy
ui-settings.pickle is specific to the tool. TripleO will not use it when
doing deployments with these templates, but it is needed to be able to
load the templates into the UI again. Note that the UI only reads this file,
so any changes made by hand to the templates will not be reflected in the UI.
The network-isolation.yaml file needs to reference the port files shipped with
tripleo-heat-templates, so by default the tool generates the paths assuming
network-isolation.yaml will be copied into the environments/ directory of
tripleo-heat-templates.
If the standard tripleo-heat-templates are in use, then the
network-isolation-absolute.yaml file can be used instead. It has hard-coded
references to the port files in /usr/share/openstack-tripleo-heat-templates.
If the generated network isolation templates are at ~/generated-templates, an
example deployment command would look like:
openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml

View File

@ -1,19 +0,0 @@
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2'
ComputeNetworkConfigTemplate: 'templates/ci/public_bond.j2'
BlockStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2'
CellControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2'
ObjectStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2'
CephStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2'
ExternalNetCidr: 10.0.0.0/24
ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}]
ExternalInterfaceDefaultRoute: 10.0.0.1
InternalApiNetCidr: 172.17.0.0/24
InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}]
StorageNetCidr: 172.18.0.0/24
StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}]
StorageMgmtNetCidr: 172.19.0.0/24
StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}]
TenantNetCidr: 172.16.0.0/24
TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}]
BondInterfaceOvsOptions: bond_mode=balance-slb

View File

@ -1,26 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml

View File

@ -1,26 +0,0 @@
resource_registry:
# External
OS::TripleO::Network::External: ../../../../network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: ../../../../network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml

View File

@ -1,699 +0,0 @@
(dp0
S'global_data'
p1
(dp2
S'control'
p3
(dp4
S'route'
p5
V192.168.24.1
p6
sS'mask'
p7
I24
sS'ec2'
p8
V192.168.24.1
p9
ssS'major'
p10
I1
sS'management'
p11
(dp12
S'start'
p13
V172.20.0.10
p14
sS'cidr'
p15
V172.20.0.0/24
p16
sS'vlan'
p17
I6
sS'end'
p18
V172.20.0.250
p19
ssS'dns2'
p20
V8.8.4.4
p21
sS'dns1'
p22
V8.8.8.8
p23
sS'storage'
p24
(dp25
g13
V172.18.0.10
p26
sg15
V172.18.0.0/24
p27
sg17
I3
sg18
V172.18.0.250
p28
ssS'auto_routes'
p29
I01
sS'bond_options'
p30
Vbond_mode=balance-slb
p31
sS'version'
p32
I2
sS'external'
p33
(dp34
S'bridge'
p35
V''
p36
sg18
V10.0.0.50
p37
sg17
I1
sg13
V10.0.0.10
p38
sg15
V10.0.0.0/24
p39
sS'gateway'
p40
V10.0.0.1
p41
ssS'internal_api'
p42
(dp43
g13
V172.17.0.10
p44
sg15
V172.17.0.0/24
p45
sg17
I2
sg18
V172.17.0.250
p46
ssS'ipv6'
p47
I00
sS'storage_mgmt'
p48
(dp49
g13
V172.19.0.10
p50
sg15
V172.19.0.0/24
p51
sg17
I4
sg18
V172.19.0.250
p52
ssS'minor'
p53
I2
sS'tenant'
p54
(dp55
g13
V172.16.0.10
p56
sg15
V172.16.0.0/24
p57
sg17
I5
sg18
V172.16.0.250
p58
sssS'data'
p59
(dp60
S'cinder-storage.yaml'
p61
(lp62
sS'ceph-storage.yaml'
p63
(lp64
(dp65
Vaddresses
p66
(lp67
sVnetwork
p68
VControlPlane
p69
sVprimary
p70
I01
sVmtu
p71
I1350
sS'members'
p72
(lp73
sVroutes
p74
(lp75
sVuse_dhcp
p76
I00
sVtype
p77
Vinterface
p78
sVname
p79
Vnic1
p80
sa(dp81
Vaddresses
p82
(lp83
sVnetwork
p84
VStorage
p85
sVprimary
p86
I01
sVmtu
p87
I1350
sg72
(lp88
sVroutes
p89
(lp90
sVuse_dhcp
p91
I00
sVtype
p92
Vinterface
p93
sVname
p94
Vnic5
p95
sa(dp96
Vaddresses
p97
(lp98
sVnetwork
p99
VStorageMgmt
p100
sVprimary
p101
I01
sVmtu
p102
I1350
sg72
(lp103
sVroutes
p104
(lp105
sVuse_dhcp
p106
I00
sVtype
p107
Vinterface
p108
sVname
p109
Vnic6
p110
sasS'controller.yaml'
p111
(lp112
(dp113
Vaddresses
p114
(lp115
sVnetwork
p116
VControlPlane
p117
sVprimary
p118
I01
sVmtu
p119
I1350
sg72
(lp120
sVroutes
p121
(lp122
sVuse_dhcp
p123
I00
sVtype
p124
Vinterface
p125
sVname
p126
Vnic1
p127
sa(dp128
Vdns_servers
p129
V{get_param: DnsServers}
p130
sVaddresses
p131
(lp132
sVnetwork
p133
VExternal
p134
sVmtu
p135
I-1
sg72
(lp136
(dp137
Vnetwork
p138
VNone
p139
sVbond_type
p140
Vovs
p141
sVovs_options
p142
V{get_param: BondInterfaceOvsOptions}
p143
sVmtu
p144
I-1
sg72
(lp145
(dp146
Vaddresses
p147
(lp148
sVnetwork
p149
VNone
p150
sVprimary
p151
I01
sVmtu
p152
I1350
sVroutes
p153
(lp154
sVuse_dhcp
p155
I00
sVtype
p156
Vinterface
p157
sVname
p158
Vnic2
p159
sa(dp160
Vaddresses
p161
(lp162
sVnetwork
p163
VNone
p164
sVprimary
p165
I00
sVmtu
p166
I1350
sVroutes
p167
(lp168
sVuse_dhcp
p169
I00
sVtype
p170
Vinterface
p171
sVname
p172
Vnic3
p173
sasVroutes
p174
(lp175
sVtype
p176
Vovs_bond
p177
sVname
p178
Vbond1
p179
sasVroutes
p180
(lp181
sVuse_dhcp
p182
I00
sVtype
p183
Vovs_bridge
p184
sVname
p185
Vbr-ex
p186
sa(dp187
Vaddresses
p188
(lp189
sVnetwork
p190
VInternalApi
p191
sVprimary
p192
I01
sVmtu
p193
I1350
sg72
(lp194
sVroutes
p195
(lp196
sVuse_dhcp
p197
I00
sVtype
p198
Vinterface
p199
sVname
p200
Vnic4
p201
sa(dp202
Vaddresses
p203
(lp204
sVnetwork
p205
VStorage
p206
sVprimary
p207
I01
sVmtu
p208
I1350
sg72
(lp209
sVroutes
p210
(lp211
sVuse_dhcp
p212
I00
sVtype
p213
Vinterface
p214
sVname
p215
Vnic5
p216
sa(dp217
Vaddresses
p218
(lp219
sVnetwork
p220
VStorageMgmt
p221
sVprimary
p222
I01
sVmtu
p223
I1350
sg72
(lp224
sVroutes
p225
(lp226
sVuse_dhcp
p227
I00
sVtype
p228
Vinterface
p229
sVname
p230
Vnic6
p231
sa(dp232
Vdns_servers
p233
V{get_param: DnsServers}
p234
sVaddresses
p235
(lp236
sVnetwork
p237
VTenant
p238
sVmtu
p239
I-1
sg72
(lp240
(dp241
Vaddresses
p242
(lp243
sVnetwork
p244
VNone
p245
sVprimary
p246
I01
sVmtu
p247
I1350
sg72
(lp248
sVroutes
p249
(lp250
sVuse_dhcp
p251
I00
sVtype
p252
Vinterface
p253
sVname
p254
Vnic7
p255
sasVroutes
p256
(lp257
sVuse_dhcp
p258
I00
sVtype
p259
Vovs_bridge
p260
sVname
p261
Vbr-tenant
p262
sasS'swift-storage.yaml'
p263
(lp264
sS'compute.yaml'
p265
(lp266
(dp267
Vaddresses
p268
(lp269
sVnetwork
p270
VControlPlane
p271
sVprimary
p272
I01
sVmtu
p273
I1350
sg72
(lp274
sVroutes
p275
(lp276
sVuse_dhcp
p277
I00
sVtype
p278
Vinterface
p279
sVname
p280
Vnic1
p281
sa(dp282
Vaddresses
p283
(lp284
sVnetwork
p285
VInternalApi
p286
sVprimary
p287
I01
sVmtu
p288
I1350
sg72
(lp289
sVroutes
p290
(lp291
sVuse_dhcp
p292
I00
sVtype
p293
Vinterface
p294
sVname
p295
Vnic4
p296
sa(dp297
Vaddresses
p298
(lp299
sVnetwork
p300
VStorage
p301
sVprimary
p302
I01
sVmtu
p303
I1350
sg72
(lp304
sVroutes
p305
(lp306
sVuse_dhcp
p307
I00
sVtype
p308
Vinterface
p309
sVname
p310
Vnic5
p311
sa(dp312
Vdns_servers
p313
V{get_param: DnsServers}
p314
sVaddresses
p315
(lp316
sVnetwork
p317
VTenant
p318
sVmtu
p319
I-1
sg72
(lp320
(dp321
Vaddresses
p322
(lp323
sVnetwork
p324
VNone
p325
sVprimary
p326
I01
sVmtu
p327
I1350
sg72
(lp328
sVroutes
p329
(lp330
sVuse_dhcp
p331
I00
sVtype
p332
Vinterface
p333
sVname
p334
Vnic7
p335
sasVroutes
p336
(lp337
sVuse_dhcp
p338
I00
sVtype
p339
Vovs_bridge
p340
sVname
p341
Vbr-tenant
p342
sass.

View File

@ -1,2 +0,0 @@
parameter_defaults:
NeutronDnsDomain: 'openstackgate.local'

View File

@ -1,2 +0,0 @@
parameter_defaults:
NeutronL3AgentExtensions: 'fip_qos,gateway_ip_qos,port_forwarding'

View File

@ -1,2 +0,0 @@
parameter_defaults:
NeutronSecurityGroupQuota: '150'

View File

@ -1,7 +0,0 @@
parameter_defaults:
StandaloneExtraConfig:
# Config related to nova libvirt
octavia::controller::connection_retry_interval: 10
nova::compute::libvirt::services::libvirt_virt_type: kvm
nova::compute::libvirt::virt_type: kvm
nova::compute::libvirt::cpu_mode: 'host-passthrough'

View File

@ -1,10 +0,0 @@
parameter_defaults:
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::virt_type: qemu
Debug: true
HideSensitiveLogs: false
DockerPuppetDebug: True
ExtraConfig:
tripleo::profile::pacemaker::database::mysql_bundle::provider_options: 'evs.suspect_timeout=PT30S'
CorosyncTokenTimeout: 30000

View File

@ -1,27 +0,0 @@
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2'
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Clustercheck
Debug: true
DockerPuppetDebug: True
NotificationDriver: 'noop'
ContainerCli: podman

View File

@ -1,164 +0,0 @@
# This scenario only install keystone and the required services to run it.
resource_registry:
OS::TripleO::Services::AodhApi: OS::Heat::None
OS::TripleO::Services::AodhEvaluator: OS::Heat::None
OS::TripleO::Services::AodhListener: OS::Heat::None
OS::TripleO::Services::AodhNotifier: OS::Heat::None
OS::TripleO::Services::Apache: OS::Heat::None
OS::TripleO::Services::AuditD: OS::Heat::None
OS::TripleO::Services::BarbicanApi: OS::Heat::None
OS::TripleO::Services::BarbicanBackendDogtag: OS::Heat::None
OS::TripleO::Services::BarbicanBackendKmip: OS::Heat::None
OS::TripleO::Services::BarbicanBackendPkcs11Crypto: OS::Heat::None
OS::TripleO::Services::BarbicanBackendSimpleCrypto: OS::Heat::None
OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None
OS::TripleO::Services::BootParams: OS::Heat::None
OS::TripleO::Services::CeilometerAgentCentral: OS::Heat::None
OS::TripleO::Services::CeilometerAgentIpmi: OS::Heat::None
OS::TripleO::Services::CeilometerAgentNotification: OS::Heat::None
OS::TripleO::Services::CephClient: OS::Heat::None
OS::TripleO::Services::CephExternal: OS::Heat::None
OS::TripleO::Services::CephMds: OS::Heat::None
OS::TripleO::Services::CephMgr: OS::Heat::None
OS::TripleO::Services::CephMon: OS::Heat::None
OS::TripleO::Services::CephNfs: OS::Heat::None
OS::TripleO::Services::CephOSD: OS::Heat::None
OS::TripleO::Services::CephRbdMirror: OS::Heat::None
OS::TripleO::Services::CephRgw: OS::Heat::None
OS::TripleO::Services::Chrony: OS::Heat::None
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Clustercheck: OS::Heat::None
OS::TripleO::Services::Collectd: OS::Heat::None
OS::TripleO::Services::ComputeCeilometerAgent: OS::Heat::None
OS::TripleO::Services::ComputeInstanceHA: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsDpdk: OS::Heat::None
OS::TripleO::Services::ContainerImagePrepare: ../../deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml
OS::TripleO::Services::DesignateApi: OS::Heat::None
OS::TripleO::Services::DesignateCentral: OS::Heat::None
OS::TripleO::Services::DesignateMDNS: OS::Heat::None
OS::TripleO::Services::DesignateProducer: OS::Heat::None
OS::TripleO::Services::DesignateSink: OS::Heat::None
OS::TripleO::Services::DesignateWorker: OS::Heat::None
OS::TripleO::Services::DesignateBind: OS::Heat::None
OS::TripleO::Services::Etcd: OS::Heat::None
OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
OS::TripleO::Services::GlanceApi: OS::Heat::None
OS::TripleO::Services::GlanceApiInternal: OS::Heat::None
OS::TripleO::Services::GnocchiApi: OS::Heat::None
OS::TripleO::Services::GnocchiMetricd: OS::Heat::None
OS::TripleO::Services::GnocchiStatsd: OS::Heat::None
OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
OS::TripleO::Services::HAproxy: OS::Heat::None
OS::TripleO::Services::HeatApi: OS::Heat::None
OS::TripleO::Services::HeatApiCfn: OS::Heat::None
OS::TripleO::Services::HeatEngine: OS::Heat::None
OS::TripleO::Services::Horizon: OS::Heat::None
OS::TripleO::Services::IpaClient: OS::Heat::None
OS::TripleO::Services::Ipsec: OS::Heat::None
OS::TripleO::Services::IronicApi: OS::Heat::None
OS::TripleO::Services::IronicConductor: OS::Heat::None
OS::TripleO::Services::IronicInspector: OS::Heat::None
OS::TripleO::Services::IronicNeutronAgent: OS::Heat::None
OS::TripleO::Services::IronicPxe: OS::Heat::None
OS::TripleO::Services::Iscsid: OS::Heat::None
# OS::TripleO::Services::Keystone: OS::Heat::None
OS::TripleO::Services::Logging::BarbicanApi: OS::Heat::None
OS::TripleO::Services::Logging::GlanceApi: OS::Heat::None
OS::TripleO::Services::Logging::HAProxy: OS::Heat::None
OS::TripleO::Services::Logging::HeatApi: OS::Heat::None
OS::TripleO::Services::Logging::HeatApiCfn: OS::Heat::None
# OS::TripleO::Services::Logging::Keystone: OS::Heat::None
OS::TripleO::Services::Logging::NeutronApi: OS::Heat::None
OS::TripleO::Services::Logging::NeutronCommon: OS::Heat::None
OS::TripleO::Services::Logging::NovaApi: OS::Heat::None
OS::TripleO::Services::Logging::NovaCommon: OS::Heat::None
OS::TripleO::Services::Logging::NovaLibvirt: OS::Heat::None
OS::TripleO::Services::Logging::NovaMetadata: OS::Heat::None
OS::TripleO::Services::Logging::PlacementApi: OS::Heat::None
OS::TripleO::Services::ManilaApi: OS::Heat::None
OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ManilaBackendFlashBlade: OS::Heat::None
OS::TripleO::Services::ManilaBackendIsilon: OS::Heat::None
OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
OS::TripleO::Services::ManilaBackendPowerMax: OS::Heat::None
OS::TripleO::Services::ManilaBackendUnity: OS::Heat::None
OS::TripleO::Services::ManilaBackendVNX: OS::Heat::None
OS::TripleO::Services::ManilaScheduler: OS::Heat::None
OS::TripleO::Services::ManilaShare: OS::Heat::None
OS::TripleO::Services::MasqueradeNetworks: OS::Heat::None
OS::TripleO::Services::Memcached: OS::Heat::None
OS::TripleO::Services::MetricsQdr: OS::Heat::None
OS::TripleO::Services::Multipathd: OS::Heat::None
# OS::TripleO::Services::MySQL: OS::Heat::None
OS::TripleO::Services::NeutronApi: OS::Heat::None
OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None
OS::TripleO::Services::NeutronBgpVpnBagpipe: OS::Heat::None
OS::TripleO::Services::NeutronCorePlugin: OS::Heat::None
OS::TripleO::Services::NeutronCorePluginMLNXSDN: OS::Heat::None
OS::TripleO::Services::NeutronCorePluginVTS: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None
OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None
OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::NeutronSfcApi: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::NovaAZConfig: OS::Heat::None
OS::TripleO::Services::NovaApi: OS::Heat::None
OS::TripleO::Services::NovaCompute: OS::Heat::None
OS::TripleO::Services::NovaConductor: OS::Heat::None
OS::TripleO::Services::NovaIronic: OS::Heat::None
OS::TripleO::Services::NovaLibvirt: OS::Heat::None
OS::TripleO::Services::NovaLibvirtGuests: OS::Heat::None
OS::TripleO::Services::NovaMetadata: OS::Heat::None
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
OS::TripleO::Services::NovaScheduler: OS::Heat::None
OS::TripleO::Services::NovaVncProxy: OS::Heat::None
OS::TripleO::Services::OVNController: OS::Heat::None
OS::TripleO::Services::OVNDBs: OS::Heat::None
OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None
OS::TripleO::Services::OctaviaApi: OS::Heat::None
OS::TripleO::Services::OctaviaDeploymentConfig: OS::Heat::None
OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None
OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None
OS::TripleO::Services::OctaviaWorker: OS::Heat::None
# OS::TripleO::Services::OsloMessagingNotify: OS::Heat::None
# OS::TripleO::Services::OsloMessagingRpc: OS::Heat::None
OS::TripleO::Services::Pacemaker: OS::Heat::None
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::PlacementApi: OS::Heat::None
OS::TripleO::Services::Podman: ../../deployment/podman/podman-baremetal-ansible.yaml
OS::TripleO::Services::Ptp: OS::Heat::None
OS::TripleO::Services::Qdr: OS::Heat::None
# OS::TripleO::Services::RabbitMQ: OS::Heat::None
OS::TripleO::Services::Redis: OS::Heat::None
OS::TripleO::Services::RsyslogSidecar: OS::Heat::None
OS::TripleO::Services::SELinux: OS::Heat::None
OS::TripleO::Services::Securetty: OS::Heat::None
OS::TripleO::Services::Snmp: OS::Heat::None
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::TLSProxyBase: OS::Heat::None
OS::TripleO::Services::Tempest: OS::Heat::None
OS::TripleO::Services::Tmpwatch: ../../deployment/logrotate/tmpwatch-install.yaml
OS::TripleO::Services::TripleoPackages: OS::Heat::None
OS::TripleO::Services::Tuned: OS::Heat::None
OS::TripleO::Services::Unbound: OS::Heat::None
OS::TripleO::Services::UndercloudTLS: OS::Heat::None
OS::TripleO::Services::UndercloudUpgrade: OS::Heat::None
parameter_defaults:
Debug: true
NotificationDriver: 'noop'
ContainerCli: podman

View File

@ -1,168 +0,0 @@
resource_registry:
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentIpmi: ../../deployment/ceilometer/ceilometer-agent-ipmi-container-puppet.yaml
OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml
OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml
OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml
OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml
OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml
OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml
OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2'
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceApiInternal
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::OVNMetadataAgent
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Redis
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- OS::TripleO::Services::CephMgr
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MetricsQdr
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Multipathd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::virt_type: qemu
# NOTE(sileht): To decrease the time test_telemetry_integration takes We
# configure Ceilometer to poll more, We configure the
# 'ceilometer-high-rate' Gnocchi archive policy to keep 1 point every 60s.
# The test will take 2 minutes instead of 10 minutes. Note that tempest
# telemetry.alarm_granularity must in sync with the archive policy, 60s
# too.
ceilometer::agent::polling::polling_interval: 15
# NOTE(mmagr): uncomment when QDR mesh will actually work (followup on review.opendev.org/702754)
#tripleo::profile::base::metrics::qdr::router_mode: interior
NotificationDriver: 'messagingv2'
ManagePolling: true
ManagePipeline: true
CeilometerEnableGnocchi: true
PipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
EventPipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
CeilometerQdrPublishEvents: true
ManageEventPipeline: true
Debug: true
DockerPuppetDebug: True
CephPools:
- name: altrbd
rule_name: replicated_rule
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephEnableDashboard: true
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderRbdExtraPools: altrbd
CinderBackupBackend: ceph
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
CollectdConnectionType: amqp1
CollectdExtraPlugins:
- rrdtool
CollectdEnableSensubility: true
CollectdEnableLibpodstats: true
LoggingServers:
- host: 127.0.0.1
port: 24224
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
# Remove ContainerCli once this scenario is tested on CentOS8
ContainerCli: podman
CephConfigPath: "/etc/ceph"
CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml"
CephSpecFqdn: true
CephOsdSpec:
data_devices:
paths:
- /dev/ceph_vg/ceph_lv_data

View File

@ -1,222 +0,0 @@
resource_registry:
OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml
OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml
OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml
OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml
OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml
OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml
OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml
OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml
OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml
OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml
OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml
OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml
OS::TripleO::Services::Multipathd: ../../deployment/multipathd/multipathd-container-ansible.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::Rsyslog: ../../deployment/logging/rsyslog-container-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
parameter_defaults:
NodeDataLookup:
AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"}
8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"}
StandaloneExtraConfig:
# NOTE(sileht): To decrease the time test_telemetry_integration takes We
# configure Ceilometer to poll more, We configure the 'high' Gnocchi
# archive policy to keep 1 point every 60s. The test will take 2 minutes
# instead of 10 minutes. Note that tempest telemetry.alarm_granularity must
# in sync with the archive policy, 60s too.
ceilometer::agent::polling::polling_interval: 15
tripleo::profile::base::metrics::qdr::interior_mesh_nodes: ''
collectd::plugin::ceph::daemons:
- 'ceph-mon.controller-00'
- 'ceph-mon.controller-01'
- 'ceph-mon.controller-02'
- 'ceph-osd.01'
- 'ceph-osd.07'
NotificationDriver: 'messagingv2'
ManagePolling: true
HeatConfigureDelegatedRoles: true
CeilometerEnableGnocchi: true
PipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
EventPipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
CeilometerQdrPublishEvents: true
CeilometerQdrPublishMetrics: true
ManageEventPipeline: true
ManagePipeline: true
Debug: true
DeployedCeph: true
CephEnableDashboard: true
CephDashboardPort: 8445
GrafanaDashboardPort: 3200
CinderRbdExtraPools: altrbd,pool2,pool3
CephPools:
- name: altrbd
rule_name: replicated_rule
application: rbd
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephExtraKeys:
- name: "client.glance"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "profile rbd pool=images"
key: "AQBRgQ9eAAAAABAAv84zEilJYZPNuJ0Iwn9Ndg=="
mode: "0600"
CephExternalMultiConfig:
# create client conf and key file for two non-existent external ceph clusters
- cluster: 'ceph2'
fsid: 'af25554b-42f6-4d2b-9b9b-d08a1132d3e8'
external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7'
keys:
- name: "client.openstack"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images"
key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q=="
mode: "0600"
dashboard_enabled: false
- cluster: 'ceph3'
fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a'
external_cluster_mon_ips: '172.18.0.8,172.18.0.9,172.18.0.10'
keys:
- name: "client.openstack"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images"
key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB2Q=="
mode: "0600"
dashboard_enabled: false
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderRbdBackendName: tripleo_ceph,tripleo_ceph2,tripleo_ceph3
CinderRbdMultiConfig:
tripleo_ceph2:
CephClusterName: ceph2
CephClusterFSID: af25554b-42f6-4d2b-9b9b-d08a1132d3e8
CinderRbdAvailabilityZone: ceph2-AZ
tripleo_ceph3:
CephClusterName: ceph3
CephClusterFSID: e2cba068-5f14-4b0f-b047-acf375c0004a
CinderRbdAvailabilityZone: ceph3-AZ
CinderBackupBackend: ceph
GlanceEnabledImportMethods: 'glance-direct,web-download,copy-image'
# For copy-image method, we are overriding the policy here to allow
# everyone and every type of image (private or public) to copy.
# This way we will be able to test copy image via non-admin as well
# as on private images. The policy is default to admin only in glance
GlanceApiPolicies:
glance-copy_image:
key: "copy_image"
value: ""
GlanceBackend: cinder
GlanceStoreDescription: 'Cinder glance store'
GlanceMultistoreConfig:
rbd_store:
GlanceBackend: rbd
GlanceStoreDescription: 'RBD glance store'
CephClientUserName: 'glance'
CephClusterName: ceph
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
EnableSTF: true
MetricsQdrAddresses:
- prefix: collectd
distribution: multicast
MetricsQdrSSLProfiles:
- name: sslProfile
CollectdConnectionType: amqp1
CollectdAmqpInterval: 5
CollectdDefaultPollingInterval: 5
CollectdEnableSensubility: true
CollectdSensubilityTransport: amqp1
CollectdSensubilityLogLevel: DEBUG
CollectdEnableLibpodstats: true
CollectdAmqpInstances:
notify:
notify: true
format: JSON
presettle: false
telemetry:
format: JSON
presettle: false
CollectdAmqpSendQueueLimit: 40
LoggingServers:
- host: 127.0.0.1
port: 24224
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
ContainerCli: podman
CephAdmVerbose: true
CephConfigOverrides:
# put logs in /var/log/ceph/<cluster-fsid>
log_to_file: true
mon_cluster_log_to_file: true
# disable logging to journald so we don't log twice
log_to_stderr: false
mon_cluster_log_to_stderr: false
log_to_journald: false
mon_cluster_log_to_journald: false
# test values
anotherkey: anothervalue
global:
globalkey: globalvalue
osd:
osdkey: osdvalue
mon:
mon_warn_on_pool_no_redundancy: false
mon_warn_on_insecure_global_id_reclaim_allowed: false
NfsUrl: 127.0.0.1
CephMsgrSecureMode: true
CephConfigPath: "/etc/ceph"
DisableCephadm: true
LVMFilterEnabled: true
LVMFilterAllowlist:
- /dev/loop2
- /dev/loop3
- /dev/loop4
MultipathdEnable: true
NovaLibvirtVolumeUseMultipath: true
NovaShowHostStatus: all
NovaApiHostStatusPolicy: >
(role:reader and system_scope:all) or (role:reader and project_id:%(project_id)s)
RsyslogElasticsearchSetting:
Server: '127.0.0.1:9200'

View File

@ -1,68 +0,0 @@
resource_registry:
OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml
OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml
OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml
OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml
OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml
OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml
OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml
OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml
OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml
OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
OS::TripleO::Services::BarbicanApi: ../../deployment/barbican/barbican-api-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::BarbicanBackendSimpleCrypto: ../../deployment/barbican/barbican-backend-simple-crypto-puppet.yaml
# NOTE(mwhahaha): cheat and use the horizon "service" for our test service
# since disable horizon. This allows us to not have to keep some test service
# in the defaults just for this case.
OS::TripleO::Services::Horizon: ../../deployment/tests/test-container-volume.yaml
parameter_defaults:
StandaloneExtraConfig:
# NOTE(sileht): To decrease the time test_telemetry_integration takes We
# configure Ceilometer to poll more, We configure the 'high' Gnocchi
# archive policy to keep 1 point every 60s. The test will take 2 minutes
# instead of 10 minutes. Note that tempest telemetry.alarm_granularity must
# in sync with the archive policy, 60s too.
ceilometer::agent::polling::polling_interval: 15
NotificationDriver: 'messagingv2'
ManagePolling: true
ManagePipeline: true
CeilometerEnableGnocchi: true
PipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
EventPipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
Debug: true
DockerPuppetDebug: true
SwiftCeilometerPipelineEnabled: false
BarbicanSimpleCryptoGlobalDefault: true
ContainerCli: podman
# Test mounting a container volume into the heat api container
ContainerTestVolumeName: test_volume
CinderVolumeOptVolumes:
- test_volume:/testvol
GlanceApiOptVolumes:
- test_volume:/testvol
HeatApiOptVolumes:
- test_volume:/testvol
HeatEngineApiOptVolumes:
- test_volume:/testvol
NeutronApiOptVolumes:
- test_volume:/testvol
NovaComputeOptVolumes:
- test_volume:/testvol
StandaloneExtraGroupVars:
cinder_fake_group_var_one: var_one_override
StandaloneParameters:
LVMFilterEnabled: true
LVMFilterAllowlist:
- /dev/allowed
NovaShowHostStatus: unknown-only

View File

@ -1,48 +0,0 @@
resource_registry:
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Horizon: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::DesignateApi: ../../deployment/designate/designate-api-container-puppet.yaml
OS::TripleO::Services::DesignateCentral: ../../deployment/designate/designate-central-container-puppet.yaml
OS::TripleO::Services::DesignateProducer: ../../deployment/designate/designate-producer-container-puppet.yaml
OS::TripleO::Services::DesignateWorker: ../../deployment/designate/designate-worker-container-puppet.yaml
OS::TripleO::Services::DesignateMDNS: ../../deployment/designate/designate-mdns-container-puppet.yaml
OS::TripleO::Services::DesignateBind: ../../deployment/designate/designate-bind-container.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::Unbound: ../../deployment/unbound/unbound-container-ansible.yaml
parameter_defaults:
Debug: true
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
KeystoneTokenProvider: 'fernet'
SwiftCeilometerPipelineEnabled: false
NotificationDriver: 'noop'
NeutronPluginExtensions: qos,port_security,dns_domain_ports
NotifyPort: 5672
ContainerCli: podman
DesignateBackendListenIPs: ["192.168.24.20"]
ExtraFirewallRules:
'301 allow arbitrary tcp rule':
dport: 12345
proto: tcp
source: 127.0.0.1
action: insert
'302 allow arbitrary udp rule':
dport: 12345
proto: udp
source: 127.0.0.1
action: append
StandaloneParameters:
ExtraFirewallRules:
'303 allow arbitrary tcp rule for controller':
dport: 12347
proto: tcp
source: 127.0.0.1
action: insert

View File

@ -1,87 +0,0 @@
resource_registry:
OS::TripleO::Services::Aide: ../../deployment/aide/aide-baremetal-ansible.yaml
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Redis: OS::Heat::None
OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml
OS::TripleO::Services::CephMds: ../../deployment/cephadm/ceph-mds.yaml
OS::TripleO::Services::CephNfs: ../../deployment/cephadm/ceph-nfs.yaml
OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::ManilaApi: ../../deployment/manila/manila-api-container-puppet.yaml
OS::TripleO::Services::ManilaScheduler: ../../deployment/manila/manila-scheduler-container-puppet.yaml
OS::TripleO::Services::ManilaShare: ../../deployment/manila/manila-share-pacemaker-puppet.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../deployment/manila/manila-backend-cephfs.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml
OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml
OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml
OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::LoginDefs: ../../deployment/login-defs/login-defs-baremetal-ansible.yaml
parameter_defaults:
GlanceSparseUploadEnabled: true
ManagePolling: true
Debug: true
CephEnableDashboard: false
CephDashboardPort: 8445
CephAdmVerbose: true
GrafanaDashboardPort: 3200
DeployedCeph: true
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephRgwKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
NovaEnableRbdBackend: true
GlanceBackend: rbd
CeilometerEnableGnocchi: true
GnocchiBackend: rbd
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
CollectdExtraPlugins:
- rrdtool
LoggingServers:
- host: 127.0.0.1
port: 24224
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
CephConfigOverrides:
# put logs in /var/log/ceph/<cluster-fsid>
log_to_file: true
mon_cluster_log_to_file: true
# disable logging to journald so we don't log twice
log_to_stderr: false
mon_cluster_log_to_stderr: false
log_to_journald: false
mon_cluster_log_to_journald: false
# test values
globalkey: globalvalue
anotherkey: anothervalue
mon:
mon_warn_on_pool_no_redundancy: false
mon_warn_on_insecure_global_id_reclaim_allowed: false
ManilaCephFSCephFSProtocolHelperType: 'NFS'
DisableCephadm: true
CephConfigPath: "/etc/ceph"
CephObservabilityDataOnly: true

View File

@ -1,114 +0,0 @@
resource_registry:
OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml
OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml
OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml
OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml
OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml
OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
# Disable OVN services
OS::TripleO::Services::OVNController: OS::Heat::None
OS::TripleO::Services::OVNDBs: OS::Heat::None
OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None
# environments/docker-ha.yaml enables the cinder service. So disable cinder services
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2'
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceApiInternal
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Multipathd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::virt_type: qemu
# Required for Centos 7.3 and Qemu 2.6.0
nova::compute::libvirt::cpu_mode: 'none'
# For ML2/OVS
NeutronMechanismDrivers: 'openvswitch'
NeutronPluginExtensions: "qos,port_security"
NeutronTypeDrivers: 'vxlan,vlan,flat,gre'
NeutronNetworkType: 'vxlan'
NeutronServicePlugins: 'router,qos,segments,trunk'
NeutronVniRanges: ['1:4094', ]
NeutronEnableDVR: false
DhcpAgentNotification: true
KernelIpNonLocalBind: 0
Debug: true
DockerPuppetDebug: True
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
MessageOfTheDay: |
This is a test message of MessageOfTheDay
ControllerParameters:
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
This node is a Controller node.
MessageOfTheDay: |
This is a test message of MessageOfTheDay
This node is a Controller node.
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
IronicCleaningDiskErase: 'metadata'
NotificationDriver: 'noop'

View File

@ -1,51 +0,0 @@
resource_registry:
OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml
OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml
OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml
OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml
OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml
OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml
OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::Horizon: OS::Heat::None
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
# Disable OVN services
OS::TripleO::Services::OVNController: OS::Heat::None
OS::TripleO::Services::OVNDBs: OS::Heat::None
OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
parameter_defaults:
DhcpAgentNotification: true
NeutronMechanismDrivers: ['openvswitch']
NeutronTypeDrivers: 'vxlan,vlan,flat,gre'
NeutronNetworkType: 'vxlan'
NeutronPluginExtensions: "qos,port_security"
NeutronServicePlugins: 'router,qos,segments,trunk'
NeutronVniRanges: ['1:4094', ]
NeutronEnableDVR: false
NeutronL3HA: true
KernelIpNonLocalBind: 0
Debug: true
DockerPuppetDebug: True
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
MessageOfTheDay: |
This is a test message of MessageOfTheDay
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
NotificationDriver: 'noop'
ContainerCli: podman

View File

@ -1,104 +0,0 @@
resource_registry:
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml
OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml
OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml
OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml
OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml
OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml
OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2'
ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2'
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceApiInternal
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::OVNMetadataAgent
- OS::TripleO::Services::OctaviaApi
- OS::TripleO::Services::OctaviaDeploymentConfig
- OS::TripleO::Services::OctaviaHealthManager
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::CephMgr
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Multipathd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::virt_type: qemu
octavia::controller::connection_retry_interval: 10
Debug: true
CephPools:
- name: altrbd
rule_name: replicated_rule
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephClusterName: mycephcluster
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
NovaEnableRbdBackend: true
CeilometerEnableGnocchi: true
GlanceBackend: rbd
GnocchiRbdPoolName: metrics
SwiftCeilometerPipelineEnabled: false
NeutronEnableForceMetadata: true
OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2
OctaviaManageNovaFlavor: true
# For now, we hardcode it but soon it'll be generated in tripleo-common
OctaviaCaKeyPassphrase: 'upstreamci'
OctaviaGenerateCerts: true
ContainerCli: podman
CephConfigPath: "/etc/ceph"
CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml"
CephSpecFqdn: true
CephOsdSpec:
data_devices:
paths:
- /dev/ceph_vg/ceph_lv_data

View File

@ -1,70 +0,0 @@
resource_registry:
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml
OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml
OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml
OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml
OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml
OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
parameter_defaults:
StandaloneExtraConfig:
octavia::controller::connection_retry_interval: 10
OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub
OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2
OctaviaCaKeyPassphrase: 'upstreamci'
OctaviaManageNovaFlavor: true
OctaviaGenerateCerts: true
NodeDataLookup:
AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"}
8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"}
Debug: true
HideSensitiveLogs: false
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
DeployedCeph: true
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
#NOTE: This scenario uses rbd backend to test the deployment feature to
# upload the amphora image in raw format instead of qcow2 format.
# (See https://bugs.launchpad.net/tripleo/+bug/1778303 for details)
NovaEnableRbdBackend: true
GlanceBackend: rbd
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
CollectdExtraPlugins:
- rrdtool
LoggingServers:
- host: 127.0.0.1
port: 24224
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
ContainerCli: podman
CephConfigPath: "/etc/ceph"

View File

@ -1,29 +0,0 @@
# touching file so CI runs scenario012
resource_registry:
OS::TripleO::Services::NovaIronic: ../../deployment/nova/nova-ironic-container-puppet.yaml
OS::TripleO::Services::IronicApi: ../../deployment/ironic/ironic-api-container-puppet.yaml
OS::TripleO::Services::IronicConductor: ../../deployment/ironic/ironic-conductor-container-puppet.yaml
OS::TripleO::Services::IronicPxe: ../../deployment/ironic/ironic-pxe-container-puppet.yaml
OS::TripleO::NodeExtraConfigPost: ../common/ironic_standalone_post.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-container-puppet.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml
OS::TripleO::Services::NovaCompute: OS::Heat::None
OS::TripleO::Services::NovaLibvirt: OS::Heat::None
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
parameter_defaults:
Debug: true
DockerPuppetDebug: True
NotificationDriver: 'noop'
SwiftCeilometerPipelineEnabled: false
IronicCleaningDiskErase: 'metadata'
NovaSchedulerDiscoverHostsInCellsInterval: 15
NeutronMechanismDrivers: ovn
NeutronNetworkType: vlan
NeutronTypeDrivers: local,geneve,vlan,flat
DhcpAgentNotification: true
IronicApiMaxRetries: 180
IronicDefaultNetworkInterface: neutron
IronicAutomatedClean: false
NeutronFlatNetworks: datacentre
NeutronNetworkVLANRanges: 'datacentre:500:599,tenant:300:399'

View File

@ -1,30 +0,0 @@
# This scenario is same as scenario10 but without Ceph.
resource_registry:
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
OS::TripleO::Services::CinderVolume: OS::Heat::None
OS::TripleO::Services::Horizon: OS::Heat::None
OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml
OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml
OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml
OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml
OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
parameter_defaults:
StandaloneExtraConfig:
octavia::controller::connection_retry_interval: 10
OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub
OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2
OctaviaCaKeyPassphrase: 'upstreamci'
OctaviaManageNovaFlavor: true
OctaviaGenerateCerts: true
Debug: true
GlanceBackend: 'file'
ContainerCli: podman

View File

@ -1,22 +0,0 @@
resource_registry:
OS::TripleO::Services::IpaClient: ../../deployment/ipa/ipaservices-baremetal-ansible.yaml
parameter_defaults:
CloudDomain: "ooo.test"
CloudName: "overcloud.ooo.test"
CloudNameInternal: "overcloud.internalapi.ooo.test"
CloudNameStorage: "overcloud.storage.ooo.test"
CloudNameStorageManagement: "overcloud.storagemgmt.ooo.test"
CloudNameCtlplane: "overcloud.ctlplane.ooo.test"
IdMServer: "ipa.ooo.test"
IdMDomain: "ooo.test"
IdMAdminPassword: "fce95318204114530f31f885c9df588f"
# domain name used by the host
NeutronDnsDomain: "ooo.test"
PasswordAuthentication: 'yes'
HAProxyExternalNetwork: ctlplane
# Add setting for custom DNS server
# DnsServers: ["ip_for_dns_server"]
DnsSearchDomains: ["ooo.test"]
LocalContainerRegistry: "192.168.24.1"
AddVipsToEtcHosts: True

View File

@ -1,50 +0,0 @@
- name: External
name_lower: external
mtu: 1350
vip: true
subnets:
external_subnet:
ip_subnet: 10.0.0.0/24
gateway_ip: 10.0.0.1
allocation_pools:
- start: 10.0.0.10
end: 10.0.0.50
- name: InternalApi
name_lower: internal_api
mtu: 1350
vip: true
subnets:
internal_api_subnet:
ip_subnet: 172.17.0.0/24
allocation_pools:
- start: 172.17.0.10
end: 172.17.0.250
- name: Storage
name_lower: storage
mtu: 1350
vip: true
subnets:
storage_subnet:
ip_subnet: 172.18.0.0/24
allocation_pools:
- start: 172.18.0.10
end: 172.18.0.250
- name: StorageMgmt
name_lower: storage_mgmt
mtu: 1350
vip: true
subnets:
storage_mgmt_subnet:
ip_subnet: 172.19.0.0/24
allocation_pools:
- start: 172.19.0.10
end: 172.19.0.250
- name: Tenant
name_lower: tenant
mtu: 1350
subnets:
tenant_subnet:
ip_subnet: 172.16.0.0/24
allocation_pools:
- start: 172.16.0.10
end: 172.16.0.250

View File

@ -1,54 +0,0 @@
- name: External
name_lower: external
mtu: 1350
vip: true
ipv6: true
subnets:
external_subnet:
ipv6_subnet: 2001:db8:fd00:1000::/64
gateway_ipv6: 2001:db8:fd00:1000::1
ipv6_allocation_pools:
- start: 2001:db8:fd00:1000::10
end: 2001:db8:fd00:1000:ffff:ffff:ffff:fffe
- name: InternalApi
name_lower: internal_api
mtu: 1350
vip: true
ipv6: true
subnets:
internal_api_subnet:
ipv6_subnet: fd00:fd00:fd00:2000::/64
ipv6_allocation_pools:
- start: fd00:fd00:fd00:2000::10
end: fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe
- name: Storage
name_lower: storage
mtu: 1350
vip: true
ipv6: true
subnets:
storage_subnet:
ipv6_subnet: fd00:fd00:fd00:3000::/64
ipv6_allocation_pools:
- start: fd00:fd00:fd00:3000::10
end: fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe
- name: StorageMgmt
name_lower: storage_mgmt
mtu: 1350
vip: true
ipv6: true
subnets:
storage_mgmt_subnet:
ipv6_subnet: fd00:fd00:fd00:4000::/64
ipv6_allocation_pools:
- start: fd00:fd00:fd00:4000::10
end: fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe
- name: Tenant
name_lower: tenant
mtu: 1350
subnets:
tenant_subnet:
ip_subnet: 172.16.0.0/24
allocation_pools:
- start: 172.16.0.10
end: 172.16.0.250

View File

@ -1,6 +0,0 @@
- network: storage_mgmt
- network: internal_api
- network: storage
- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP
network: external
- network: ctlplane

View File

@ -1,6 +0,0 @@
- network: storage_mgmt
- network: internal_api
- network: storage
- ip_address: 2001:DB8:FD00:1000:0:0:0:5 # CI jobs rely on this fixed ip for the external VIP
network: external
- network: ctlplane

View File

@ -1,25 +0,0 @@
- name: Create fcontext entry for container-config-scripts
community.general.sefcontext:
target: "/var/lib/container-config-scripts(/.*)?"
setype: container_file_t
state: present
tags:
- container_config_scripts
- name: Create /var/lib/container-config-scripts
become: true
file:
path: /var/lib/container-config-scripts
state: directory
setype: container_file_t
tags:
- container_config_scripts
- name: Write container config scripts
become: true
no_log: True
tripleo_container_config_scripts:
config_data: "{{ lookup('file', tripleo_role_name + '/container_config_scripts.yaml', errors='ignore') | default({}, True) | from_yaml }}"
config_dir: /var/lib/container-config-scripts
tags:
- container_config_scripts

View File

@ -1,104 +0,0 @@
- name: Create and ensure setype for /var/log/containers directory
become: true
file:
path: /var/log/containers
state: directory
selevel: s0
mode: 0750
tags:
- host_config
- name: Create ContainerLogStdoutPath directory
become: true
file:
path: "{{ container_log_stdout_path }}"
state: directory
selevel: s0
tags:
- host_config
- name: Create /var/lib/tripleo-config directory
become: true
file:
path: /var/lib/tripleo-config
state: directory
setype: container_file_t
selevel: s0
recurse: true
tags:
- host_config
- container_config
- container_config_tasks
- container_config_scripts
- container_startup_configs
# The container config files
# /var/lib/container-startup-configs.json is removed as we now write
# per-step files instead
- name: Clean old /var/lib/container-startup-configs.json file
become: true
file:
path: /var/lib/container-startup-configs.json
state: absent
tags:
- container_startup_configs
# For legacy, can be removed in Train cycle
- name: Clean old /var/lib/docker-container-startup-configs.json file
become: true
file:
path: /var/lib/docker-container-startup-configs.json
state: absent
tags:
- container_startup_configs
- import_tasks: common_container_config_scripts.yaml
# This file location is deprecated and the new location is now:
# /var/lib/tripleo-config/container-startup-config/step_X/<container_name>.json
# Can be removed in V cycle
- name: Write /var/lib/tripleo-config/container-startup-config-readme.txt
become: true
no_log: True
copy:
content: "Container startup configs moved to /var/lib/tripleo-config/container-startup-config"
dest: /var/lib/tripleo-config/container-startup-config-readme.txt
force: true
mode: '0600'
tags:
- container_startup_configs
- name: Generate startup configs files per step and per container
become: true
container_startup_config:
config_base_dir: /var/lib/tripleo-config/container-startup-config
config_data: "{{ lookup('template', (tripleo_role_name + '/docker_config.yaml'), errors='ignore') | default({}, True) | from_yaml }}"
tags:
- container_startup_configs
- name: Create /var/lib/kolla/config_files directory
become: true
file:
path: /var/lib/kolla/config_files
state: directory
setype: container_file_t
selevel: s0
recurse: true
tags:
- container_startup_configs
- name: Create /var/lib/config-data directory
become: true
file:
path: /var/lib/config-data
mode: 0755
state: directory
setype: container_file_t
selevel: s0
- name: Write container config json files
become: true
tripleo_container_configs:
config_data: "{{ lookup('file', tripleo_role_name + '/kolla_config.yaml', errors='ignore') | default({}, True) | from_yaml }}"
tags:
- container_startup_configs

View File

@ -1,180 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
[ "$DEBUG" = "false" ] || set -x
rm -rf /etc/puppet
mkdir -p /etc/puppet
cp -dR /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
echo "{\"step\": $STEP}" > /etc/puppet/hieradata/docker_puppet.json
TAGS=""
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
if [ ! -z ${STEP_CONFIG+x} ]; then
# package and service should NOT be managed inside containers
echo -e "noop_resource('package')" | tee /etc/config.pp
echo -e "noop_resource('service')" | tee -a /etc/config.pp
echo -e "${STEP_CONFIG}" | tee -a /etc/config.pp
fi
CHECK_MODE=""
if [ -d "/tmp/puppet-check-mode" ]; then
mkdir -p /etc/puppet/check-mode
cp -a /tmp/puppet-check-mode/* /etc/puppet/check-mode
CHECK_MODE="--hiera_config /etc/puppet/check-mode/hiera.yaml"
fi
# Create a reference timestamp to easily find all files touched by
# puppet. The sync ensures we get all the files we want due to
# different timestamp.
conf_data_path="/var/lib/config-data/${NAME}"
origin_of_time="${conf_data_path}.origin_of_time"
touch $origin_of_time
sync
export NET_HOST="${NET_HOST:-false}"
set +e
if [ "$NET_HOST" == "false" ]; then
export FACTER_hostname=$HOSTNAME
fi
export FACTER_uuid=$(cat /sys/class/dmi/id/product_uuid | tr '[:upper:]' '[:lower:]')
echo 'Running puppet'
# FIXME(bogdando): stdout may be falling behind of the logged syslog messages
set -x
/usr/bin/puppet apply --summarize \
--detailed-exitcodes \
--color=false \
--modulepath=/etc/puppet/modules:/usr/share/openstack-puppet/modules \
$TAGS \
$CHECK_MODE \
/etc/config.pp \
2>&1 | logger -s -t puppet-user
rc=${PIPESTATUS[0]}
[ "$DEBUG" = "false" ] && set +x
set -e
if [ $rc -ne 2 -a $rc -ne 0 ]; then
exit $rc
fi
verbosity=""
[ "$DEBUG" = "false" ] || verbosity="-v"
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
archivedirs=("/etc" "/root" "/opt" "/var/www" "/var/spool/cron" "/var/lib/nova/.ssh")
rsync_srcs=""
for d in "${archivedirs[@]}"; do
if [ -d "$d" ]; then
rsync_srcs+=" $d"
fi
done
# On stack update, if a password was changed in a config file,
# some services (e.g. mysql) must change their internal state
# (e.g. password in mysql DB) when tripleo_container_manage restarts them;
# and they need the old password to achieve that.
# For those services, we update the config hash to notify
# tripleo_container_manage that a restart is needed, but we do not update
# the password file in container-puppet if the file already existed
# before and let the service regenerate it instead.
password_files="/root/.my.cnf"
exclude_files=""
for p in $password_files; do
if [ -f "$p" -a -f "${conf_data_path}$p" ]; then
exclude_files+=" --exclude=$p"
fi
done
# Exclude read-only mounted directories/files which we do not want
# to copy or delete.
ro_files="/etc/puppet/ /etc/puppetlabs/ /opt/puppetlabs/ /etc/pki/ca-trust/extracted "
ro_files+="/etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ca-bundle.crt "
ro_files+="/etc/pki/tls/certs/ca-bundle.trust.crt /etc/pki/tls/cert.pem "
ro_files+="/etc/hosts /etc/localtime /etc/hostname "
# /etc/openldap is bind mounted with "ro" option in keystone containers.
ro_files+="/etc/openldap"
for ro in $ro_files; do
if [ -e "$ro" ]; then
exclude_files+=" --exclude=$ro"
fi
done
echo "Evaluating config files to be removed for the $NAME configuration"
TMPFILE=$(mktemp /tmp/tmp.XXXXXXXXXX)
TMPFILE2=$(mktemp /tmp/tmp.XXXXXXXXXX)
trap 'rm -rf $TMPFILE $TMPFILE2' EXIT INT HUP
rsync -av -R --dry-run --delete-after $exclude_files $rsync_srcs ${conf_data_path} |\
awk '/^deleting/ {print $2}' > $TMPFILE
echo "Rsyncing config files from ${rsync_srcs} into ${conf_data_path}"
rsync -a $verbosity -R --delay-updates --delete-after $exclude_files $rsync_srcs ${conf_data_path}
# Also make a copy of files modified during puppet run
echo "Gathering files modified after $(stat -c '%y' $origin_of_time)"
# Purge obsoleted contents to maintain a fresh and filtered mirror
puppet_generated_path=/var/lib/config-data/puppet-generated/${NAME}
mkdir -p ${puppet_generated_path}
echo "Ensuring the removed config files are also purged in ${puppet_generated_path}:"
cat $TMPFILE | sort
cat $TMPFILE | xargs -n1 -r -I{} \
bash -c "rm -rf ${puppet_generated_path}/{}"
exec 5>&1
exec 1>$TMPFILE2
find $rsync_srcs -newer $origin_of_time -not -path '/etc/puppet*' -print0
exec 1>&5
echo "Files modified during puppet run:"
cat $TMPFILE2 | xargs -0 printf "%s\n" | sort -h
echo "Rsyncing the modified files into ${puppet_generated_path}"
rsync -a $verbosity -R -0 --delay-updates --delete-after $exclude_files \
--files-from=$TMPFILE2 / ${puppet_generated_path}
# Cleanup any special files that might have been copied into place
# previously because fixes for LP#1860607 did not cleanup and required
# manual intervention if a container hit this. We can safely remove these
# files because they should be bind mounted into containers
for ro in $ro_files; do
if [ -e "${puppet_generated_path}/${ro}" ]; then
rm -rf "${puppet_generated_path}/${ro}"
fi
done
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
# note: while being excluded from the output, password files
# are still included in checksum computation
additional_checksum_files=""
excluded_original_passwords=""
for p in $password_files; do
if [ -f "$p" ]; then
additional_checksum_files+=" $p"
excluded_original_passwords+=" --exclude=/var/lib/config-data/*${p}"
fi
done
# We need to exclude the swift rings and backups as those change over time
# and containers do not need to restart if they change
EXCLUDE=--exclude='*/etc/swift/backups/*'\ --exclude='*/etc/swift/*.ring.gz'\ --exclude='*/etc/swift/*.builder'\ --exclude='*/etc/libvirt/passwd.db'\ ${excluded_original_passwords}
# We need to repipe the tar command through 'tar xO' to force text
# output because otherwise the sed command cannot work. The sed is
# needed because puppet puts timestamps as comments in cron and
# parsedfile resources, hence triggering a change at every redeploy
tar -c --mtime='1970-01-01' $EXCLUDE -f - ${conf_data_path} $additional_checksum_files | tar xO | \
sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${conf_data_path}.md5sum
tar -c --mtime='1970-01-01' $EXCLUDE -f - ${puppet_generated_path} $additional_checksum_files --mtime='1970-01-01' | tar xO \
| sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${puppet_generated_path}.md5sum
fi

View File

@ -1,101 +0,0 @@
---
# If a new play with potential facts gathering is added, make sure we set:
# any_errors_fatal: false
# ignore_unreachable: true
# So we don't break the MaxFailPercentage feature since these plays can't use
# Ansible strategies.
- hosts: localhost
name: Check if required variables are defined
tasks:
- ansible.builtin.fail:
msg: deploy_source_host variable is not defined
when: deploy_source_host is not defined
- ansible.builtin.fail:
msg: deploy_target_host variable is not defined
when: deploy_target_host is not defined
# Keep in mind that you will still need to use defaults in order to be
# able to pass Ansible own syntax check for blocks like 'hosts'.
- hosts: all
name: Clear cached facts
# We don't want to gather facts, just clear them
gather_facts: false
tasks:
- ansible.builtin.meta: clear_facts
tags:
- facts
- hosts: "{{ deploy_source_host | default([]) }}:{{ deploy_target_host | default([]) }}"
name: Gather facts
strategy: tripleo_free
gather_facts: true
tasks:
- name: Set legacy facts
ansible.builtin.set_fact:
ansible_distribution: "{{ ansible_facts['distribution'] }}"
ansible_distribution_major_version: "{{ ansible_facts['distribution_major_version'] }}"
ansible_distribution_release: "{{ ansible_facts['distribution_release'] }}"
ansible_distribution_version: "{{ ansible_facts['distribution_version'] }}"
ansible_os_family: "{{ ansible_facts['os_family'] }}"
tags:
- facts
- hosts: all
name: Load global variables
gather_facts: "{{ gather_facts | default(false) }}"
any_errors_fatal: false
ignore_unreachable: true
tasks:
- ansible.builtin.include_vars: global_vars.yaml # noqa: missing-import
no_log: true
tags:
- always
- hosts: all
name: Include extra variables from files
gather_facts: "{{ gather_facts | default(false) }}"
any_errors_fatal: false
ignore_unreachable: true
tasks:
- name: Include Service VIP vars
ansible.builtin.include_vars:
file: "{{ playbook_dir }}/service_vip_vars.yaml"
name: service_vip_vars
ignore_errors: true
- name: Include OVN bridge MAC address variables
ansible.builtin.include_vars:
file: "{{ playbook_dir }}/ovn_bridge_mac_address_vars.yaml"
name: ovn_bridge_mac_address_vars
ignore_errors: true
tags:
- always
- hosts: "{{ deploy_target_host | default([]) }}"
name: Render all_nodes data as group_vars for overcloud
gather_facts: "{{ gather_facts | default(false) }}"
# we don't skip errors or ignore unreachable on this one because it's
# delegated to localhost. localhost should never be unreachable (tm)
any_errors_fatal: True
tasks:
- name: Render all_nodes data as group_vars for overcloud
delegate_to: localhost
become: false
run_once: true
tripleo_all_nodes_data:
tags:
- facts
- always
- hosts: "{{ deploy_target_host | default([]) }}"
name: Set all_nodes data as group_vars for overcloud
gather_facts: "{{ gather_facts | default(false) }}"
any_errors_fatal: false
ignore_unreachable: true
tasks:
- name: Set all_nodes data as group_vars for overcloud
ansible.builtin.include_vars: "{{ playbook_dir }}/group_vars/overcloud.json"
no_log: true
tags:
- facts
- always

View File

@ -1,46 +0,0 @@
- name: Deploy step tasks for step 0
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Deploy step tasks for step 0' to resume from this task
when: "tripleo_minor_update is not defined or tripleo_minor_update != 'true'"
- name: Ensure /var/log/journal exists
become: true
file: path=/var/log/journal state=directory mode=0750 owner=root group=root setype=var_log_t
- name: Check cloud-init status
shell: systemctl is-active cloud-init.service || systemctl is-enabled cloud-init.service
failed_when: false
become: true
register: cloud_init_enabled
- name: Check if cloud-init is disabled via kernel args
command: grep -q cloud-init=disabled /proc/cmdline
failed_when: false
check_mode: false
register: cloud_init_vendor_disabled
- name: Wait for cloud-init to finish, if enabled
community.general.cloud_init_data_facts:
filter: status
register: res
until: >
res.cloud_init_data_facts.status.v1.stage is defined and
not res.cloud_init_data_facts.status.v1.stage
retries: 50
delay: 5
when:
- cloud_init_enabled.rc is defined
- cloud_init_enabled.rc == 0
- cloud_init_vendor_disabled.rc is not defined or cloud_init_vendor_disabled.rc != 0
- name: Create /var/lib/container-puppet
become: true
no_log: True
file: path=/var/lib/container-puppet state=directory setype=container_file_t selevel=s0 recurse=true
- name: Write container-puppet.sh
become: true
no_log: True
copy: src=container_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.sh force=yes mode=0755 setype=container_file_t
- include_tasks: "{{ _task_file_path }}"
vars:
_task_file_path: "{{ tripleo_role_name }}/deploy_steps_tasks_step0.yaml"
_full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}"
_test_file_exists: "{{ _full_file_path is exists }}"
when: _test_file_exists | bool

View File

@ -1,196 +0,0 @@
- name: Delete existing /var/lib/tripleo-config/check-mode directory for check mode
become: true
file:
path: /var/lib/tripleo-config/check-mode
state: absent
tags:
- host_config
- container_config
- container_config_tasks
- container_config_scripts
- container_startup_configs
when:
- ansible_check_mode|bool
check_mode: false
- name: Create /var/lib/tripleo-config/check-mode directory for check mode
become: true
file:
path: /var/lib/tripleo-config/check-mode
state: directory
setype: container_file_t
selevel: s0
recurse: true
tags:
- host_config
- container_config
- container_config_tasks
- container_config_scripts
- container_startup_configs
when:
- ansible_check_mode|bool
check_mode: false
# Puppet manifest for baremetal host configuration
- name: Write the puppet step_config manifest
become: true
no_log: True
copy:
content: "{{ lookup('file', tripleo_role_name + '/step_config.pp', errors='ignore') | default('', True) }}"
dest: /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp
force: true
mode: '0600'
tags:
- host_config
check_mode: false
diff: false
- name: Diff puppet step_config manifest changes for check mode
command:
diff -uN /var/lib/tripleo-config/puppet_step_config.pp /var/lib/tripleo-config/check-mode/puppet_step_config.pp
register: diff_results
tags:
- host_config
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode
failed_when: false
changed_when: diff_results.rc == 1
- name: Diff puppet step_config manifest changes for check mode
debug:
var: diff_results.stdout_lines
changed_when: diff_results.rc == 1
when:
- ansible_check_mode|bool
- ansible_diff_mode
tags:
- host_config
# Puppet Containers Config directory used to generate container configs
- name: Create /var/lib/container-puppet
become: true
file:
path: /var/lib/container-puppet
state: directory
setype: container_file_t
selevel: s0
tags:
- container_config
- container_config_tasks
- name: Delete existing /var/lib/container-puppet/check-mode for check mode
become: true
file:
path: /var/lib/container-puppet/check-mode
state: absent
tags:
- container_config
check_mode: false
when:
- ansible_check_mode|bool
- name: Create /var/lib/container-puppet/check-mode for check mode
become: true
file:
path: /var/lib/container-puppet/check-mode
state: directory
setype: container_file_t
selevel: s0
tags:
- container_config
check_mode: false
when:
- ansible_check_mode|bool
- name: Write container-puppet.json file
become: true
no_log: True
copy:
content: "{{ lookup('template', tripleo_role_name + '/puppet_config.yaml', errors='ignore') | default([], True) | from_yaml | to_nice_json }}"
dest: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json
force: true
mode: '0600'
tags:
- container_config
check_mode: false
diff: false
- name: Diff container-puppet.json changes for check mode
command:
diff -uN /var/lib/container-puppet/container-puppet.json /var/lib/container-puppet/check-mode/container-puppet.json
register: diff_results
tags:
- container_config
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode
failed_when: false
changed_when: diff_results.rc == 1
- name: Diff container-puppet.json changes for check mode
debug:
var: diff_results.stdout_lines
changed_when: diff_results.rc == 1
when:
- ansible_check_mode|bool
- ansible_diff_mode
tags:
- container_config
- name: Ensure config hashes are up-to-date for container startup configs
become: true
container_puppet_config:
update_config_hash_only: true
tags:
- container_startup_configs
- name: Set host puppet debugging fact string
set_fact:
host_puppet_config_debug: "--debug --verbose"
when:
- enable_puppet | bool
- enable_debug | bool
tags:
- host_config
- name: Check for /etc/puppet/check-mode directory for check mode
stat:
path: /etc/puppet/check-mode
register: check_mode_dir
when: ansible_check_mode|bool
tags:
- host_config
- container_config
- name: Create /etc/puppet/check-mode/hieradata directory for check mode
become: true
file:
path: /etc/puppet/check-mode/hieradata
state: directory
setype: container_file_t
selevel: s0
recurse: true
check_mode: false
when:
- ansible_check_mode|bool
- not check_mode_dir.stat.exists
tags:
- host_config
- container_config
- name: Create puppet check-mode files if they don't exist for check mode
become: true
shell: |
cp -a /etc/puppet/hiera.yaml /etc/puppet/check-mode/hiera.yaml
cp -a /etc/puppet/hieradata/* /etc/puppet/check-mode/hieradata/
sed -i 's/\/etc\/puppet\/hieradata/\/etc\/puppet\/check-mode\/hieradata/' /etc/puppet/check-mode/hiera.yaml
when:
- ansible_check_mode|bool
- not check_mode_dir.stat.exists
check_mode: false
tags:
- host_config
- container_config

View File

@ -1,140 +0,0 @@
#####################################################
# Per step puppet configuration of the baremetal host
#####################################################
- name: Write the config_step hieradata
become: true
no_log: True
copy:
content: "{{ dict(step=step|int) | to_json }}"
dest: /etc/puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}hieradata/config_step.json
force: true
mode: '0600'
check_mode: false
tags:
- host_config
- name: Run puppet host configuration for step {{ step }}
become: true
async: 3600
poll: 0
when: enable_puppet|bool
shell: >-
set -o pipefail;
puppet apply {{ host_puppet_config_debug | default('') }}
--modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
--detailed-exitcodes
--summarize
--color=false
{{ ansible_check_mode | bool | ternary('--noop', '') }}
{{ ansible_check_mode | bool | ternary('--hiera_config /etc/puppet/check-mode/hiera.yaml', '') }}
/var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp
2>&1 | logger -s -t puppet-user
register: puppet_host_async_result
no_log: true
tags:
- host_config
check_mode: false
- name: Wait for puppet host configuration to finish
async_status:
jid: "{{ puppet_host_async_result.ansible_job_id }}"
register: puppet_host_outputs
until: puppet_host_outputs.finished
retries: 360
delay: 10
failed_when:
- (not puppet_host_outputs.finished) or (puppet_host_outputs.rc is defined and puppet_host_outputs.rc not in [0, 2])
when:
- not (ansible_check_mode | bool)
- enable_puppet|bool
tags:
- host_config
- name: "Debug output for task: Run puppet host configuration for step {{ step }}"
debug:
var: puppet_host_outputs.stdout_lines | default([]) | union(puppet_host_outputs.stderr_lines | default([]))
when:
- not ansible_check_mode|bool
- enable_puppet | bool
- puppet_host_outputs.rc is defined
changed_when: puppet_host_outputs.rc == 2
failed_when: puppet_host_outputs.rc not in [0, 2]
tags:
- host_config
#######################################
# Pre-cache facts for puppet containers
#######################################
# We don't want the pre-cache tasks to be skipped in dry-run so we force
# check_mode to "no".
# https://bugzilla.redhat.com/show_bug.cgi?id=1738529
- name: Pre-cache facts for puppet containers
include_role:
name: tripleo_puppet_cache
tags:
- container_config
- container_config_tasks
#######################################
# Generate config via puppet containers
#######################################
- name: Include container-puppet tasks (generate config) during step 1
include_tasks: generate-config-tasks.yaml
when: step|int == 1
tags:
- container_config
#####################################
# Per step starting of the containers
#####################################
- name: Per step starting of the containers using tripleo-ansible
become: true
environment:
TRIPLEO_MINOR_UPDATE: '{{ tripleo_minor_update | default(false) }}'
block:
- name: "Manage containers for step {{ step }} with tripleo-ansible"
include_role:
name: tripleo_container_manage
vars:
tripleo_container_manage_concurrency: 5
tripleo_container_manage_cli: "{{ container_cli }}"
tripleo_container_manage_config: "/var/lib/tripleo-config/container-startup-config/step_{{ step }}"
tripleo_container_manage_config_id: "tripleo_step{{ step }}"
tripleo_container_manage_debug: "{{ enable_debug | bool }}"
tripleo_container_manage_healthcheck_disabled: "{{ container_healthcheck_disabled | bool }}"
tripleo_container_manage_log_path: "{{ container_log_stdout_path }}"
tripleo_container_manage_config_patterns: '*.json'
tripleo_container_manage_check_puppet_config: true
tags:
- container_startup_configs
########################################################
# Bootstrap tasks - run any tasks that have been defined
########################################################
- name: "Clean container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}}"
become: true
file:
path: /var/lib/container-puppet/container-puppet-tasks{{step}}.json
state: absent
tags:
- container_config_tasks
- name: Calculate container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}}
set_fact:
"{{'host_container_puppet_tasks_' ~ step}}": "{{lookup('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | union([item])}}"
loop: "{{container_puppet_tasks.get('step_' ~ step, [])}}"
when: (groups[item.service_name] | default ([]) | map('extract', hostvars, 'inventory_hostname') | sort | first | lower) == ansible_facts['hostname'] | lower
vars:
container_puppet_tasks: "{{ lookup('file', tripleo_role_name + '/container_puppet_tasks.yaml', errors='ignore') | default({}, True) | from_yaml }}"
tags:
- container_config_tasks
- name: Include container-puppet tasks for step {{step}}
include_tasks: host-container-puppet-tasks.yaml
when: ('host_container_puppet_tasks_' ~ step) is defined
tags:
- container_config_tasks

File diff suppressed because it is too large Load Diff

View File

@ -1,54 +0,0 @@
- name: Block for container-puppet tasks (generate config) during step {{ step }} with tripleo-ansible
become: true
tags:
- container_config
block:
- name: Create base directory puppet configs
file:
path: "/var/lib/tripleo-config/container-puppet-config"
mode: 0700
recurse: true
setype: container_file_t
- name: "Generate container puppet configs for step {{ step }}"
container_puppet_config:
check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}'
config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}"
debug: "{{ docker_puppet_debug | bool }}"
net_host: true
no_archive: false
puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json"
short_hostname: "{{ ansible_facts['hostname'] | lower }}"
step: "{{ step }}"
- name: "Manage Puppet containers (generate config) for step {{ step }}"
tripleo_container_manage:
config_id: "tripleo_puppet_step{{ step }}"
config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}"
config_patterns: "container-puppet-*.json"
concurrency: "{{ docker_puppet_process_count }}"
- name: Diff puppet-generated changes for check mode
become: true
shell: |
diff -ruN --no-dereference -q /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated
diff -ruN --no-dereference /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated
register: diff_results
tags:
- container_config
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode
failed_when: false
changed_when: diff_results.rc == 1
- name: Diff puppet-generated changes for check mode
debug:
var: diff_results.stdout_lines
changed_when: diff_results.rc == 1
when:
- ansible_check_mode|bool
- ansible_diff_mode
tags:
- container_config

View File

@ -1,31 +0,0 @@
- name: Hiera config
include_role:
name: tripleo_hieradata
- name: Hiera symlink
become: true
file:
src: /etc/puppet/hiera.yaml
dest: /etc/hiera.yaml
state: link
force: true
- name: Hieradata from vars
include_role:
name: tripleo_hieradata
tasks_from: hieradata_vars.yaml
vars:
hieradata_templates_list:
- bootstrap_node
- all_nodes
- vip_data
- net_ip_map
- cloud_domain
- fqdn
- service_names
- service_configs
- extraconfig
- role_extraconfig
- ovn_chassis_mac_map
- name: Ensure ansible_managed hieradata file exists
include_role:
name: tripleo_hieradata
tasks_from: ansible_hieradata.yml

View File

@ -1,33 +0,0 @@
- name: Write container-puppet-tasks json file for {{ansible_facts['hostname'] | lower}} step {{step}}
no_log: True
copy:
content: "{{lookup ('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | to_nice_json}}"
dest: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{step}}.json"
force: true
mode: '0600'
check_mode: false
tags:
- container_config_tasks
- name: Block for container-puppet tasks (bootstrap tasks) for step {{ step }} with tripleo-ansible
tags:
- container_config_tasks
block:
- name: "Generate container puppet configs for step {{ step }}"
container_puppet_config:
check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}'
config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}"
debug: "{{ docker_puppet_debug | bool }}"
net_host: true
no_archive: true
puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{ step }}.json"
short_hostname: "{{ ansible_facts['hostname'] | lower }}"
step: "{{ step }}"
- name: "Manage Puppet containers (bootstrap tasks) for step {{ step }}"
tripleo_container_manage:
config_id: "tripleo_puppet_step{{ step }}"
config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}"
config_patterns: 'container-puppet-*.json'
debug: "{{ docker_puppet_debug | bool }}"
concurrency: "{{ docker_puppet_process_count }}"

View File

@ -1 +0,0 @@
{% include 'deploy-steps.j2' %}

View File

@ -1,443 +0,0 @@
heat_template_version: wallaby
description: >
Utility stack to convert an array of services into a set of combined
role configs.
parameters:
Services:
default: []
description: |
List nested stack service templates.
type: comma_delimited_list
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
description: Parameters specific to the role
default: {}
type: json
resources:
ServiceChain:
type: OS::Heat::ResourceChain
properties:
resources: {get_param: Services}
concurrent: true
resource_properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
ServiceServerMetadataHook:
type: OS::TripleO::{{role.name}}ServiceServerMetadataHook
properties:
RoleData: {get_attr: [ServiceChain, role_data]}
PuppetStepConfig:
type: OS::Heat::Value
properties:
type: string
value:
yaql:
expression:
# select 'step_config' only from services that do not have a docker_config
coalesce($.data.service_names, []).zip(coalesce($.data.step_config, []), coalesce($.data.docker_config, [])).where($[2] = null).where($[1] != null).select($[1]).join("\n")
data:
service_names: {get_attr: [ServiceChain, role_data, service_name]}
step_config: {get_attr: [ServiceChain, role_data, step_config]}
docker_config: {get_attr: [ServiceChain, role_data, docker_config]}
DockerConfig:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
expression:
# select 'docker_config' only from services that have it
coalesce($.data.service_names, []).zip(coalesce($.data.docker_config, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
data:
service_names: {get_attr: [ServiceChain, role_data, service_names]}
docker_config: {get_attr: [ServiceChain, role_data, docker_config]}
DockerConfigScripts:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
expression:
# select 'container_config_scripts' only from services that have it
coalesce($.data.service_names, []).zip(coalesce($.data.container_config_scripts, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
data:
service_names: {get_attr: [ServiceChain, role_data, service_names]}
container_config_scripts: {get_attr: [ServiceChain, role_data, container_config_scripts]}
MonitoringSubscriptionsConfig:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('monitoring_subscription')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
ServiceNames:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
filter:
- [null]
- {get_attr: [ServiceChain, role_data, service_name]}
GlobalConfigSettings:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('global_config_settings')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
KeystoneResourcesConfigs:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('keystone_resources')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
CoreServices:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('core_services')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
ServiceConfigSettings:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
expression: coalesce($.data.role_data, []).where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
DeployStepsTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('deploy_steps_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ExternalDeployTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('external_deploy_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ExternalPostDeployTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('external_post_deploy_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ScaleTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('scale_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ExternalUpdateTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('external_update_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ExternalUpgradeTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('external_upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
PreUpgradeRollingTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
expression: coalesce($.data, []).where($ != null).select($.get('pre_upgrade_rolling_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
UpgradeTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
PostUpgradeTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('post_upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
UpdateTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
PostUpdateTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
UpgradeBatchTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
expression: coalesce($.data, []).where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
PuppetConfig:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
expression: coalesce($.data, []).where($ != null).select($.get('puppet_config')).where($ != null).distinct()
data: {get_attr: [ServiceChain, role_data]}
KollaConfig:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
expression: coalesce($.data.role_data, []).where($ != null).select($.get('kolla_config')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
# DEPRECATED in favor of ContainerPuppetTasks.
DockerPuppetTasks:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
# This extracts docker_puppet_tasks for each step,
# and merges {service_name: the_service} for each item, so we
# can later filter based on the bootstrap hostname for the service
expression: dict(coalesce($.data, []).where($ != null).select([$.get('docker_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1]))
data: {get_attr: [ServiceChain, role_data]}
ContainerPuppetTasks:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
# This extracts container_puppet_tasks for each step,
# and merges {service_name: the_service} for each item, so we
# can later filter based on the bootstrap hostname for the service
expression: dict(coalesce($.data, []).where($ != null).select([$.get('container_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1]))
data: {get_attr: [ServiceChain, role_data]}
HostFirewallTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks
expression: coalesce($.data, []).where($ != null).select($.get('host_firewall_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
HostPrepTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks
expression: coalesce($.data, []).where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
PreDeployStepTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks
expression: coalesce($.data, []).where($ != null).select($.get('pre_deploy_step_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
AnsibleGroupVars:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('ansible_group_vars')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
FirewallRules:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_rules')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
FirewallFrontendRules:
type: OS::Heat::Value
properties:
type: json
value:
frontend:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_frontend_rules')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
ssl_frontend:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_ssl_frontend_rules')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
edge_frontend:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_frontend_rules')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
edge_sslfrontend:
map_merge:
yaql:
expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_ssl_frontend_rules')).where($ != null)).flatten()
data: {role_data: {get_attr: [ServiceChain, role_data]}}
outputs:
role_data:
description: Combined Role data for this set of services.
value:
service_names: {get_attr: [ServiceNames, value]}
monitoring_subscriptions: {get_attr: [MonitoringSubscriptionsConfig, value]}
config_settings:
yaql:
expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
data: {get_attr: [ServiceChain, role_data, config_settings]}
global_config_settings: {get_attr: [GlobalConfigSettings, value]}
service_config_settings: {get_attr: [ServiceConfigSettings, value]}
step_config: {get_attr: [PuppetStepConfig, value]}
deploy_steps_tasks: {get_attr: [DeployStepsTasks, value]}
external_deploy_tasks: {get_attr: [ExternalDeployTasks, value]}
external_post_deploy_tasks: {get_attr: [ExternalPostDeployTasks, value]}
external_update_tasks: {get_attr: [ExternalUpdateTasks, value]}
external_upgrade_tasks: {get_attr: [ExternalUpgradeTasks, value]}
scale_tasks: {get_attr: [ScaleTasks, value]}
pre_upgrade_rolling_tasks: {get_attr: [PreUpgradeRollingTasks, value]}
upgrade_tasks: {get_attr: [UpgradeTasks, value]}
post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]}
update_tasks: {get_attr: [UpdateTasks, value]}
post_update_tasks: {get_attr: [PostUpdateTasks, value]}
upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]}
service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
ansible_group_vars:
map_merge:
- ansible_async_dir: "/tmp/.ansible_async"
- {get_attr: [AnsibleGroupVars, value]}
keystone_resources: {get_attr: [KeystoneResourcesConfigs, value]}
# Firewall rules
firewall_rules: {get_attr: [FirewallRules, value]}
firewall_frontend_rules: {get_attr: [FirewallFrontendRules, value, frontend]}
firewall_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, ssl_frontend]}
firewall_edge_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_frontend]}
firewall_edge_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_ssl_frontend]}
# Keys to support docker/services
puppet_config: {get_attr: [PuppetConfig, value]}
kolla_config: {get_attr: [KollaConfig, value]}
docker_config: {get_attr: [DockerConfig, value]}
container_config_scripts: {get_attr: [DockerConfigScripts, value]}
container_puppet_tasks:
map_merge:
- {get_attr: [ContainerPuppetTasks, value]}
- {get_attr: [DockerPuppetTasks, value]}
host_prep_tasks:
list_concat:
- {get_attr: [HostFirewallTasks, value]}
- {get_attr: [HostPrepTasks, value]}
pre_deploy_step_tasks: {get_attr: [PreDeployStepTasks, value]}

View File

@ -1,74 +0,0 @@
heat_template_version: wallaby
parameters:
name:
type: string
default: ""
actions:
type: comma_delimited_list
default: ['CREATE', 'UPDATE']
server:
type: string
config:
type: string
input_values:
type: json
description: input values for the software deployments
default: {}
input_key:
type: string
default: 'get_input'
signal_transport:
type: string
default: 'CFN_SIGNAL'
input_values_validate:
type: string
default: 'LAX'
resources:
TripleODeployment:
type: OS::Heat::Value
properties:
value:
name: {get_param: name}
server: {get_param: server}
config: {get_param: config}
input_values: {get_param: input_values}
deployment: {get_resource: TripleOSoftwareDeployment}
TripleOSoftwareDeployment:
type: OS::Heat::SoftwareDeployment
properties:
name: deployment_resource
config: {get_param: config}
server: 'fake_server_id'
input_values: {get_param: input_values}
signal_transport: NO_SIGNAL
actions: {get_param: actions}
outputs:
deploy_status_code:
value: 0
deploy_stderr:
value: ''
deploy_stdout:
value: ''
show:
value: ''
update_managed_packages:
description: boolean value indicating whether to upgrade managed packages
value: false
hostname:
description: hostname
value: ''

View File

@ -1,74 +0,0 @@
heat_template_version: wallaby
parameters:
name:
type: string
default: ""
actions:
type: comma_delimited_list
default: ['CREATE', 'UPDATE']
server:
type: string
config:
type: string
input_values:
type: json
description: input values for the software deployments
default: {}
input_key:
type: string
default: 'get_input'
signal_transport:
type: string
default: 'CFN_SIGNAL'
input_values_validate:
type: string
default: 'LAX'
resources:
TripleODeployment:
type: OS::Heat::Value
properties:
value:
name: {get_param: name}
server: {get_param: server}
config: {get_param: config}
input_values: {get_param: input_values}
deployment: {get_resource: TripleOSoftwareDeployment}
TripleOSoftwareDeployment:
type: OS::Heat::StructuredDeployment
properties:
name: deployment_resource
config: {get_param: config}
server: 'fake_server_id'
input_values: {get_param: input_values}
signal_transport: NO_SIGNAL
actions: {get_param: actions}
outputs:
deploy_status_code:
value: 0
deploy_stderr:
value: ''
deploy_stdout:
value: ''
show:
value: ''
update_managed_packages:
description: boolean value indicating whether to upgrade managed packages
value: false
hostname:
description: hostname
value: ''

View File

@ -1,95 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2022 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import subprocess
import sys
SOCKET = "unix:/run/podman/podman.sock"
FORMAT = ("{service: .Name, container: .Id, status: .State.Running, "
"healthy: .State.Health.Status}")
SKIP_LIST = ['_bootstrap', 'container-puppet-', '_db_sync',
'_ensure_', '_fix_', '_init_', '_map_', '_wait_',
'mysql_data_ownership', 'configure_cms_options']
def execute(cmd, workdir: str = None,
prev_proc: subprocess.Popen = None) -> subprocess.Popen:
# Note(mmagr): When this script is executed by collectd-sensubility started
# via collectd the script has non-root permission but inherits
# environment from collectd with root permission. We need
# to avoid sensubility access /root when using podman-remote.
# See https://bugzilla.redhat.com/show_bug.cgi?id=2091076 for
# more info.
proc_env = os.environ.copy()
proc_env["HOME"] = "/tmp"
if type(cmd[0]) is list: # multiple piped commands
last = prev_proc
for c in cmd:
last = execute(c, workdir, last)
return last
else: # single command
inpipe = prev_proc.stdout if prev_proc is not None else None
proc = subprocess.Popen(cmd, cwd=workdir, env=proc_env, stdin=inpipe,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if prev_proc is not None:
prev_proc.stdout.close()
prev_proc.stderr.close()
return proc
def fetch_container_health(containers):
out = []
for cont in set(containers.split('\n')) - set(SKIP_LIST):
if not cont:
continue
proc = execute([
[shutil.which('podman-remote'),
'--url', SOCKET, 'inspect', cont],
[shutil.which('jq'), '.[] | %s' % FORMAT]
])
o, e = proc.communicate()
if proc.returncode != 0:
msg = "Failed to fetch status of %s: %s" % (cont, e.decode())
return proc.returncode, msg
item = json.loads(o.decode())
if len(item['healthy']) > 0:
item['status'] = item['healthy']
else:
item['status'] = 'running' if item['status'] else 'stopped'
item['healthy'] = int(item['healthy'] == 'healthy')
out.append(item)
return 0, out
if __name__ == "__main__":
proc = execute([shutil.which('podman-remote'), '--url', SOCKET,
'ps', '--all', '--format', '{{.Names}}'])
o, e = proc.communicate()
if proc.returncode != 0:
print("Failed to list containers:\n%s\n%s" % (o.decode(), e.decode()))
sys.exit(1)
rc, status = fetch_container_health(o.decode())
if rc != 0:
print("Failed to inspect containers:\n%s" % status)
sys.exit(rc)
print(json.dumps(status))

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -e
# Wait until we know the mysql server is up and responding
timeout ${DB_MAX_TIMEOUT:-60} /bin/bash -c 'until mysqladmin -uroot ping 2>/dev/null; do sleep 1; done'
# After an upgrade, make sure that the running mysql had a chance to
# update its data table on disk.
mysql_upgrade
# Upgrade to 10.3: the default table row format changed from COMPACT
# to DYNAMIC, so upgrade the existing tables.
compact_tables=$(mysql -se 'SELECT CONCAT("`",TABLE_SCHEMA,"`.`",TABLE_NAME,"`") FROM information_schema.tables WHERE ENGINE = "InnoDB" and ROW_FORMAT = "Compact";');
for i in $compact_tables; do echo converting row format of table $i; mysql -e "ALTER TABLE $i ROW_FORMAT=DYNAMIC;"; done;

View File

@ -1,161 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2022 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from configparser import ConfigParser
import logging
import os
import subprocess
import sys
from urllib import parse as urlparse
config = ConfigParser(strict=False)
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(stream=sys.stdout, level=loglevel)
LOG = logging.getLogger('nova_api_ensure_default_cells')
NOVA_CFG = '/etc/nova/nova.conf'
CELL0_ID = '00000000-0000-0000-0000-000000000000'
DEFAULT_CELL_NAME = 'default'
def template_netloc_credentials(netloc, index=None):
if '@' in netloc:
userpass, hostport = netloc.split('@', 1)
has_pass = ':' in userpass
if index is None:
cred_template = '{username}'
if has_pass:
cred_template += ':{password}'
else:
cred_template = '{{username{index}}}'.format(index=index)
if has_pass:
cred_template += ':{{password{index}}}'.format(index=index)
return '@'.join((cred_template, hostport))
else:
return netloc
def template_url(url):
parsed = urlparse.urlparse(url)
if ',' in parsed.netloc:
orig_netlocs = parsed.netloc.split(',')
templ_netlocs = []
index = 0
for netloc in orig_netlocs:
index += 1
templ_netlocs.append(template_netloc_credentials(netloc, index))
new_netloc = ','.join(templ_netlocs)
else:
new_netloc = template_netloc_credentials(parsed.netloc)
return parsed._replace(netloc=new_netloc).geturl()
def parse_list_cells(list_cells_output):
list_cells_lines = list_cells_output.split('\n')
if len(list_cells_lines) < 5:
raise ValueError('Invalid nova-manage cell_v2 list_cells output')
data_rows = list_cells_lines[3:-2]
by_name = {}
by_uuid = {}
for row in data_rows:
parts = row.split('|')
entry = {
'name': parts[1].strip(),
'uuid': parts[2].strip(),
'transport_url': parts[3].strip(),
'database_connection': parts[4].strip(),
}
by_name[entry['name']] = entry
by_uuid[entry['uuid']] = entry
return by_name, by_uuid
def create_or_update_default_cells(cell0_db, default_db, default_transport_url):
list_cells_cmd = ['/usr/bin/nova-manage', 'cell_v2', 'list_cells', '--verbose']
list_cells_output = subprocess.check_output(list_cells_cmd, encoding='utf-8')
cells_by_name, cells_by_uuid = parse_list_cells(list_cells_output)
if CELL0_ID in cells_by_uuid:
LOG.info('Setting cell0 database connection to \'{}\''.format(cell0_db))
cmd = [
'/usr/bin/nova-manage', 'cell_v2', 'update_cell',
'--cell_uuid', CELL0_ID,
'--database_connection', cell0_db,
'--transport-url', 'none:///'
]
else:
LOG.info('Creating cell0 with database connection \'{}\''.format(cell0_db))
cmd = [
'/usr/bin/nova-manage', 'cell_v2', 'map_cell0',
'--database_connection', cell0_db
]
subprocess.check_call(cmd)
if DEFAULT_CELL_NAME in cells_by_name:
LOG.info('Setting default cell database connection to \'{}\' and transport url to \'{}\''.format(
default_db, default_transport_url))
cmd = [
'/usr/bin/nova-manage', 'cell_v2', 'update_cell',
'--cell_uuid', cells_by_name[DEFAULT_CELL_NAME]['uuid'],
'--database_connection', default_db,
'--transport-url', default_transport_url
]
else:
LOG.info('Creating default cell with database connection \'{}\' and transport url \'{}\''.format(
default_db, default_transport_url))
cmd = [
'/usr/bin/nova-manage', 'cell_v2', 'create_cell',
'--name', DEFAULT_CELL_NAME,
'--database_connection', default_db,
'--transport-url', default_transport_url
]
subprocess.check_call(cmd)
def replace_db_name(db_url, db_name):
return urlparse.urlparse(db_url)._replace(path=db_name).geturl()
if __name__ == '__main__':
if os.path.isfile(NOVA_CFG):
try:
config.read(NOVA_CFG)
except Exception:
LOG.exception('Error while reading nova.conf:')
sys.exit(1)
else:
LOG.error('Nova configuration file %s does not exist', NOVA_CFG)
sys.exit(1)
default_database_connection = config.get('database', 'connection')
cell0_database_connection = replace_db_name(default_database_connection, 'nova_cell0')
default_transport_url = config.get('DEFAULT', 'transport_url')
create_or_update_default_cells(
template_url(cell0_database_connection),
template_url(default_database_connection),
template_url(default_transport_url)
)

View File

@ -1,70 +0,0 @@
#!/bin/bash
set -e
CEPH_INFO=($*)
if [ -z "$CEPH_INFO" ]; then
echo "error: At least one CLUSTER:CLIENT tuple must be specified"
exit 1
fi
echo "------------------------------------------------"
echo "Initializing virsh secrets for: ${CEPH_INFO[@]}"
for INFO in ${CEPH_INFO[@]}; do
IFS=: read CLUSTER CLIENT <<< $INFO
if [ ! -f /etc/ceph/${CLUSTER}.conf ]; then
echo "Error: /etc/ceph/${CLUSTER}.conf was not found"
echo "Path to nova_libvirt_init_secret was ${CEPH_INFO}"
exit 1
fi
FSID=$(awk '$1 == "fsid" {print $3}' /etc/ceph/${CLUSTER}.conf)
if [ -z "${FSID}" ]; then
echo "Error: /etc/ceph/${CLUSTER}.conf contained an empty fsid definition"
echo "Check your ceph configuration"
exit 1
fi
echo "--------"
echo "Initializing the virsh secret for '$CLUSTER' cluster ($FSID) '$CLIENT' client"
# Ensure the secret XML file exists. Puppet should have created a secret.xml
# file for the first cluster's secret, so detect when to use that file.
if grep -q $FSID /etc/nova/secret.xml; then
SECRET_FILE="/etc/nova/secret.xml"
SECRET_NAME="client.${CLIENT} secret"
else
SECRET_FILE="/etc/nova/${CLUSTER}-secret.xml"
SECRET_NAME="${CLUSTER}.client.${CLIENT} secret"
fi
if [ ! -f $SECRET_FILE ]; then
echo "Creating $SECRET_FILE"
cat <<EOF > $SECRET_FILE
<secret ephemeral='no' private='no'>
<usage type='ceph'>
<name>${SECRET_NAME}</name>
</usage>
<uuid>${FSID}</uuid>
</secret>
EOF
else
echo "The $SECRET_FILE file already exists"
fi
# Ensure the libvirt secret is defined
if /usr/bin/virsh secret-list | grep -q $FSID; then
echo "The virsh secret for $FSID has already been defined"
else
/usr/bin/virsh secret-define --file $SECRET_FILE
fi
# Fetch the key from the keyring and ensure the secret is set
KEY=$(awk '$1 == "key" {print $3}' /etc/ceph/${CLUSTER}.client.${CLIENT}.keyring)
if /usr/bin/virsh secret-get-value $FSID 2>/dev/null | grep -q $KEY; then
echo "The virsh secret for $FSID has already been set"
else
/usr/bin/virsh secret-set-value --secret $FSID --base64 $KEY
fi
done

View File

@ -1,247 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import pwd
import selinux
import stat
import sys
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(stream=sys.stdout, level=loglevel)
LOG = logging.getLogger('nova_statedir')
class PathManager(object):
"""Helper class to manipulate ownership of a given path"""
def __init__(self, path):
self.path = path
self.uid = None
self.gid = None
self.is_dir = None
self.secontext = None
self._update()
def _update(self):
try:
statinfo = os.stat(self.path)
self.is_dir = stat.S_ISDIR(statinfo.st_mode)
self.uid = statinfo.st_uid
self.gid = statinfo.st_gid
self.secontext = selinux.lgetfilecon(self.path)[1]
except Exception:
LOG.exception('Could not update metadata for %s', self.path)
raise
def __str__(self):
return "uid: {} gid: {} path: {}{}".format(
self.uid,
self.gid,
self.path,
'/' if self.is_dir else ''
)
def has_owner(self, uid, gid):
return self.uid == uid and self.gid == gid
def has_either(self, uid, gid):
return self.uid == uid or self.gid == gid
def chown(self, uid, gid):
target_uid = -1
target_gid = -1
if self.uid != uid:
target_uid = uid
if self.gid != gid:
target_gid = gid
if (target_uid, target_gid) != (-1, -1):
LOG.info('Changing ownership of %s from %d:%d to %d:%d',
self.path,
self.uid,
self.gid,
self.uid if target_uid == -1 else target_uid,
self.gid if target_gid == -1 else target_gid)
try:
os.chown(self.path, target_uid, target_gid)
self._update()
except Exception:
LOG.exception('Could not change ownership of %s: ',
self.path)
raise
else:
LOG.info('Ownership of %s already %d:%d',
self.path,
uid,
gid)
def chcon(self, context):
# If dir returns whether to recursively set context
try:
try:
selinux.lsetfilecon(self.path, context)
LOG.info('Setting selinux context of %s to %s',
self.path, context)
return True
except OSError as e:
if self.is_dir and e.errno == 95:
# Operation not supported, assume NFS mount and skip
LOG.info('Setting selinux context not supported for %s',
self.path)
return False
else:
raise
except Exception:
LOG.exception('Could not set selinux context of %s to %s:',
self.path, context)
raise
class NovaStatedirOwnershipManager(object):
"""Class to manipulate the ownership of the nova statedir (/var/lib/nova).
The nova uid/gid differ on the host and container images. An upgrade
that switches from host systemd services to docker requires a change in
ownership. Previously this was a naive recursive chown, however this
causes issues if nova instance are shared via an NFS mount: any open
filehandles in qemu/libvirt fail with an I/O error (LP1778465).
Instead the upgrade/FFU ansible tasks now lay down a marker file when
stopping and disabling the host systemd services. We use this file to
determine the host nova uid/gid. We then walk the tree and update any
files that have the host uid/gid to the docker nova uid/gid. As files
owned by root/qemu etc... are ignored this avoids the issues with open
filehandles. The marker is removed once the tree has been walked.
For subsequent runs, or for a new deployment, we simply ensure that the
docker nova user/group owns all directories. This is required as the
directories are created with root ownership in host_prep_tasks (the
docker nova uid/gid is not known in this context).
"""
def __init__(self, statedir, upgrade_marker='upgrade_marker',
nova_user='nova', secontext_marker='../_nova_secontext',
exclude_paths=None):
self.statedir = statedir
self.nova_user = nova_user
self.upgrade_marker_path = os.path.join(statedir, upgrade_marker)
self.secontext_marker_path = os.path.normpath(os.path.join(statedir, secontext_marker))
self.upgrade = os.path.exists(self.upgrade_marker_path)
self.exclude_paths = [self.upgrade_marker_path]
if exclude_paths is not None:
for p in exclude_paths:
if not p.startswith(os.path.sep):
p = os.path.join(self.statedir, p)
self.exclude_paths.append(p)
self.target_uid, self.target_gid = self._get_nova_ids()
self.previous_uid, self.previous_gid = self._get_previous_nova_ids()
self.id_change = (self.target_uid, self.target_gid) != \
(self.previous_uid, self.previous_gid)
self.target_secontext = self._get_secontext()
def _get_nova_ids(self):
nova_uid, nova_gid = pwd.getpwnam(self.nova_user)[2:4]
return nova_uid, nova_gid
def _get_previous_nova_ids(self):
if self.upgrade:
statinfo = os.stat(self.upgrade_marker_path)
return statinfo.st_uid, statinfo.st_gid
else:
return self._get_nova_ids()
def _get_secontext(self):
if os.path.exists(self.secontext_marker_path):
return selinux.lgetfilecon(self.secontext_marker_path)[1]
else:
return None
def _walk(self, top, chcon=True):
for f in os.listdir(top):
pathname = os.path.join(top, f)
if pathname in self.exclude_paths:
continue
try:
pathinfo = PathManager(pathname)
LOG.info("Checking %s", pathinfo)
if pathinfo.is_dir:
# Always chown the directories
pathinfo.chown(self.target_uid, self.target_gid)
chcon_r = chcon
if chcon:
chcon_r = pathinfo.chcon(self.target_secontext)
self._walk(pathname, chcon_r)
elif self.id_change:
# Only chown files if it's an upgrade and the file is owned by
# the host nova uid/gid
pathinfo.chown(
self.target_uid if pathinfo.uid == self.previous_uid
else pathinfo.uid,
self.target_gid if pathinfo.gid == self.previous_gid
else pathinfo.gid
)
if chcon:
pathinfo.chcon(self.target_secontext)
except Exception:
# Likely to have been caused by external systems
# interacting with this directory tree,
# especially on NFS e.g snapshot dirs.
# Just ignore it and continue on to the next entry
continue
def run(self):
LOG.info('Applying nova statedir ownership')
LOG.info('Target ownership for %s: %d:%d',
self.statedir,
self.target_uid,
self.target_gid)
pathinfo = PathManager(self.statedir)
LOG.info("Checking %s", pathinfo)
pathinfo.chown(self.target_uid, self.target_gid)
chcon = self.target_secontext is not None
if chcon:
pathinfo.chcon(self.target_secontext)
self._walk(self.statedir, chcon)
if self.upgrade:
LOG.info('Removing upgrade_marker %s',
self.upgrade_marker_path)
os.unlink(self.upgrade_marker_path)
LOG.info('Nova statedir ownership complete')
def get_exclude_paths():
exclude_paths = os.environ.get('NOVA_STATEDIR_OWNERSHIP_SKIP')
if exclude_paths is not None:
exclude_paths = exclude_paths.split(os.pathsep)
return exclude_paths
if __name__ == '__main__':
NovaStatedirOwnershipManager('/var/lib/nova', exclude_paths=get_exclude_paths()).run()

View File

@ -1,109 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from configparser import ConfigParser
import logging
import logging.handlers
import os
import sys
import time
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient import client
from novaclient.exceptions import ClientException
config = ConfigParser(strict=False)
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
LOG = logging.getLogger('nova_wait_for_api_service')
LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s '
'%(name)s %(message)s')
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
LOG.handlers.clear()
LOG.setLevel(loglevel)
LOG.propagate = True
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
LOG.addHandler(stream_handler)
iterations = 60
timeout = 10
nova_cfg = '/etc/nova/nova.conf'
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument('-k', '--insecure',
action="store_false",
dest='insecure',
default=True,
help='Allow insecure connection when using SSL')
args = parser.parse_args()
LOG.debug('Running with parameter insecure = %s',
args.insecure)
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
sys.exit(1)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=config.get('neutron',
'auth_url'),
username=config.get('neutron',
'username'),
password=config.get('neutron',
'password'),
project_name=config.get('neutron',
'project_name'),
project_domain_name=config.get('neutron',
'project_domain_name'),
user_domain_name=config.get('neutron',
'user_domain_name'))
sess = session.Session(auth=auth, verify=args.insecure)
# Wait until this host is listed in the service list
for i in range(iterations):
try:
nova = client.Client('2.11', session=sess, endpoint_type='internal')
nova.versions.list()
LOG.info('Nova-api service active')
sys.exit(0)
except ClientException:
LOG.info('Waiting for nova-api service')
except Exception:
LOG.exception(
'Error while waiting for nova-api service')
time.sleep(timeout)
sys.exit(1)
# vim: set et ts=4 sw=4 :

View File

@ -1,118 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from configparser import ConfigParser
import logging
import logging.handlers
import os
import socket
import sys
import time
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient import client
config = ConfigParser(strict=False)
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
LOG = logging.getLogger('nova_wait_for_compute_service')
LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s '
'%(name)s %(message)s')
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
LOG.handlers.clear()
LOG.setLevel(loglevel)
LOG.propagate = True
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
LOG.addHandler(stream_handler)
iterations = 60
timeout = 10
nova_cfg = '/etc/nova/nova.conf'
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument('-k', '--insecure',
action="store_false",
dest='insecure',
default=True,
help='Allow insecure connection when using SSL')
args = parser.parse_args()
LOG.debug('Running with parameter insecure = %s',
args.insecure)
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
sys.exit(1)
my_host = config.get('DEFAULT', 'host')
if not my_host:
# If host isn't set nova defaults to this
my_host = socket.gethostname()
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=config.get('neutron',
'auth_url'),
username=config.get('neutron',
'username'),
password=config.get('neutron',
'password'),
project_name=config.get('neutron',
'project_name'),
project_domain_name=config.get('neutron',
'project_domain_name'),
user_domain_name=config.get('neutron',
'user_domain_name'))
sess = session.Session(auth=auth, verify=args.insecure)
nova = client.Client('2.11', session=sess, endpoint_type='internal',
region_name=config.get('neutron', 'region_name'))
# Wait until this host is listed in the service list
for i in range(iterations):
try:
service_list = nova.services.list(binary='nova-compute')
for entry in service_list:
host = getattr(entry, 'host', '')
zone = getattr(entry, 'zone', '')
if host == my_host and zone != 'internal':
LOG.info('Nova-compute service registered')
sys.exit(0)
LOG.info('Waiting for nova-compute service to register')
except Exception:
LOG.exception(
'Error while waiting for nova-compute service to register')
time.sleep(timeout)
sys.exit(1)
# vim: set et ts=4 sw=4 :

View File

@ -1,103 +0,0 @@
#!/bin/bash
# pacemaker_mutex_restart_bundle.sh --lock mysql galera galera-bundle Master _
# pacemaker_mutex_restart_bundle.sh --lock ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master
set -u
usage() {
echo "Restart a clustered resource in a coordinated way across the cluster"
echo "Usage:"
echo " $0 --lock <tripleo-service> <pcmk-resource> <pcmk-bundle> <target-state-local> <target-state-cluster>"
echo
}
log() {
echo "$(date -u): $1"
}
error() {
echo "$(date -u): $1" 1>&2
exit 1
}
pacemaker_supports_promoted() {
# The Promoted token is only matched in recent pacemaker versions
grep -wq "<value>Promoted</value>" /usr/share/pacemaker/resources-*.rng
}
ACTION=$1
case $ACTION in
--help) usage; exit 0;;
--lock) ;;
*) error "Unknown action '$ACTION'";;
esac
TRIPLEO_SERVICE=$2
LOCK_NAME=${TRIPLEO_SERVICE}-restart-lock
LOCK_OWNER=$(crm_node -n 2>/dev/null)
rc=$?
if [ $rc -ne 0 ]; then
if [ $rc -eq 102 ]; then
log "Cluster is not running locally, no need to restart resource $TRIPLEO_SERVICE"
exit 0
else
error "Unexpected error while connecting to the cluster (rc: $rc), bailing out"
fi
fi
RESOURCE_NAME=$3
BUNDLE_NAME=$4
WAIT_TARGET_LOCAL=$5
WAIT_TARGET_ANYWHERE=${6:-_}
if pacemaker_supports_promoted; then
WAIT_TARGET_LOCAL=$(echo "$5" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/')
WAIT_TARGET_ANYWHERE=$(echo "${6:-_}" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/')
promoted_role="Promoted"
else
promoted_role="Master"
fi
# The lock TTL should accommodate for the resource start/promote timeout
if [ "$RESOURCE_NAME" != "$BUNDLE_NAME" ]; then
if [ "$WAIT_TARGET_LOCAL" = "$promoted_role" ] || [ "$WAIT_TARGET_ANYWHERE" = "$promoted_role" ]; then
rsc_op="promote"
else
rsc_op="start"
fi
# <op id="galera-promote-interval-0s" interval="0s" name="promote" on-fail="block" timeout="300s"/>
PCMK_TTL=$(cibadmin -Q | xmllint -xpath "string(//primitive[@id='${RESOURCE_NAME}']/operations/op[@name='${rsc_op}']/@timeout)" - | sed 's/s$//')
LOCK_TTL=$((PCMK_TTL + 30))
else
# The podman RA's default start timeout
LOCK_TTL=90
fi
log "Acquire a ${LOCK_TTL}s restart lock for service $TRIPLEO_SERVICE before restarting it"
# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually
rc=1
while [ $rc -ne 0 ]; do
/var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire $LOCK_NAME $LOCK_OWNER $LOCK_TTL
rc=$?
if [ $rc != 0 ]; then
if [ $rc -gt 1 ]; then
error "Could not acquire lock due to unrecoverable error (rc: $rc), bailing out"
else
log "Could not acquire lock, retrying"
sleep 10
fi
fi
done
log "Restart the service $TRIPLEO_SERVICE locally"
# Reuse the local restart script in t-h-t (driven by env var TRIPLEO_MINOR_UPDATE)
TRIPLEO_MINOR_UPDATE=true /var/lib/container-config-scripts/pacemaker_restart_bundle.sh $TRIPLEO_SERVICE $RESOURCE_NAME $BUNDLE_NAME $WAIT_TARGET_LOCAL $WAIT_TARGET_ANYWHERE
# If we reached this point, always try to release the lock
log "Release the restart lock for service $TRIPLEO_SERVICE"
/var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $LOCK_NAME $LOCK_OWNER
rc=$?
if [ $rc -ne 0 ] && [ $rc -ne 1 ]; then
error "Could not release held lock (rc: $rc)"
fi

View File

@ -1,120 +0,0 @@
#!/bin/bash
# pacemaker_mutex_shutdown.sh --acquire
# pacemaker_mutex_shutdown.sh --release
set -u
usage() {
echo "Shutdown a cluster node in a coordinated way across the cluster"
echo "Usage:"
echo " $0 --acquire # prevent other node from shutting down until we hold the lock"
echo " $0 --release # release the lock, other node can compete for the shutdown lock"
echo
}
log() {
echo "$(date -u): $1"
}
error() {
echo "$(date -u): $1" 1>&2
exit 1
}
# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually
shutdown_lock_acquire() {
local lockname=$1
local requester=$2
local ttl=$3
local rc=1
local current_owner
local owner_stopped
local owner_rc
log "Acquiring the shutdown lock"
while [ $rc -ne 0 ]; do
/var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire-once $lockname $requester $ttl
rc=$?
if [ $rc -ne 0 ]; then
if [ $rc -eq 2 ]; then
error "Could not acquire the shutdown lock due to unrecoverable error (rc: $rc), bailing out"
else
# The lock is held by another node.
current_owner=$(/var/lib/container-config-scripts/pacemaker_resource_lock.sh --owner $lockname)
owner_rc=$?
if [ $owner_rc -eq 2 ]; then
error "Could not get the shutdown lock owner due to unrecoverable error (rc: $owner_rc), bailing out"
fi
if [ $owner_rc -eq 0 ]; then
# If the owner is marked as offline, that means it has shutdown and
# we can clean the lock preemptively and try to acquire it.
owner_stopped=$(crm_mon -1X | xmllint --xpath 'count(//nodes/node[@name="'${current_owner}'" and @online="false" and @unclean="false"])' -)
if [ "${owner_stopped}" = "1" ]; then
log "Shutdown lock held by stopped node '${current_owner}', lock can be released"
/var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $current_owner
continue
fi
fi
log "Shutdown lock held by another node (rc: $rc), retrying"
sleep 10
fi
fi
done
log "Shutdown lock acquired"
return 0
}
# Release the lock if we still own it. Not owning it anymore is not fatal
shutdown_lock_release() {
local lockname=$1
local requester=$2
local rc
log "Releasing the shutdown lock"
/var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $requester
rc=$?
if [ $rc -ne 0 ]; then
if [ $rc -gt 1 ]; then
error "Could not release the shutdown lock due to unrecoverable error (rc: $rc), bailing out"
else
log "Shutdown lock no longer held, nothing to do"
fi
else
log "Shutdown lock released"
fi
return 0
}
ACTION=$1
if [ -z "$ACTION" ]; then
error "Action must be specified"
fi
LOCK_NAME=tripleo-shutdown-lock
LOCK_OWNER=$(crm_node -n 2>/dev/null)
rc=$?
if [ $rc -ne 0 ]; then
if [ $rc -eq 102 ]; then
log "Cluster is not running locally, no need to acquire the shutdown lock"
exit 0
else
error "Unexpected error while connecting to the cluster (rc: $rc), bailing out"
fi
fi
# We start with a very high TTL, that long enough to accommodate a cluster stop.
# As soon as the node will get offline, the other competing node will be entitled
# to steal the lock, so they should never wait that long in practice.
LOCK_TTL=600
case $ACTION in
--help) usage; exit 0;;
--acquire|-a) shutdown_lock_acquire ${LOCK_NAME} ${LOCK_OWNER} ${LOCK_TTL};;
--release|-r) shutdown_lock_release ${LOCK_NAME} ${LOCK_OWNER};;
*) error "Invalid action";;
esac
exit $?

View File

@ -1,267 +0,0 @@
#!/bin/bash
MAX_RETRIES=10
CIB_ENOTFOUND=105
usage() {
echo "Set a global property in the cluster with a validity timestamp."
echo "Usage:"
echo " $0 --acquire <lock_name> <lock_owner> <lock_ttl_in_seconds>"
echo " $0 --release <lock_name> <lock_owner>"
echo
}
log() {
echo "$(date -u): $1" 1>&2
}
error() {
echo "$(date -u): $1" 1>&2
exit 1
}
lock_create() {
local name=$1
local data=$2
# cibadmin won't overwrite a key if someone else succeeded to create it concurrently
cibadmin --sync-call --scope crm_config --create --xml-text "<cluster_property_set id='${name}'><nvpair id='${name}-pair' name='${name}' value='${data}'/></cluster_property_set>" &>/dev/null
return $?
}
lock_update() {
local name=$1
local expected_data=$2
local new_data=$3
# we only update the lock we expect to see, so we can't update someone else's lock
cibadmin --sync-call --scope crm_config --modify --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." --xml-text "<nvpair id='${name}-pair' name='${name}' value='${new_data}'/>" &>/dev/null
return $?
}
lock_delete() {
local name=$1
local expected_data=$2
# we only delete the lock we expect to see, so we can't delete someone else's lock
cibadmin --sync-call --scope crm_config --delete --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." &>/dev/null
return $?
}
lock_get() {
local lockname=$1
local res
local rc
res=$(cibadmin --query --scope crm_config --xpath "//cluster_property_set/nvpair[@name='$lockname']" 2>/dev/null)
rc=$?
if [ $rc -eq 0 ]; then
echo "$res" | sed -n 's/.*value="\([^"]*\)".*/\1/p'
fi
return $rc
}
lock_owner() {
local lock=$1
echo "$lock" | cut -d':' -f1
}
lock_has_expired() {
local lock=$1
local expiry=$(echo "$lock" | cut -d':' -f2)
local now=$(date +%s)
test $now -ge $expiry
}
# Perform a lock action and restart if the CIB has been modified before
# committing the lock action
try_action() {
local fun=$1
local lock=$2
local requester=$3
local args=${4:-}
local tries=$MAX_RETRIES
local rc=1
if [ "$fun" = "lock_acquire" ] || [ "$fun" = "lock_release" ]; then
log "Try running $fun"
else
return 2
fi
while [ $rc -ne 0 ]; do
$fun $lock $requester $args
rc=$?
if [ $rc -eq 0 ]; then
log "Operation $1 succeeded"
return 0
elif [ $rc -eq 3 ]; then
# rc == 3 -> CIB changed before push
if [ $tries -eq 0 ]; then
log "Failed to commit after $MAX_RETRIES retries. Bailing out."
return 2
else
log "Failed to commit. Retrying operation."
tries=$(($tries - 1))
fi
elif [ $rc -eq 2 ]; then
# rc == 2 -> unrecoverable cib error (e.g. pacemaker down)
log "Unexpected failure. Bailing out"
return $rc
else
# rc == 1 -> lock error (not owner, lock doesn't exists)
return $rc
fi
done
}
# The lock mechanism uses cibadmin's atomic creation so cluster-wide
# state coherency is guaranteed by pacemaker
lock_acquire() {
local lockname=$1
local requester=$2
local ttl=$3
local rc
local lock
local expiry
local owner
log "Check whether the lock is already held in the CIB"
lock=$(lock_get $lockname)
rc=$?
if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then
log "Could not retrieve info from the CIB"
return 2
fi
if [ -n "$lock" ]; then
lock_has_expired $lock
rc=$?
if [ $rc -eq 0 ]; then
log "Lock has expired, now available for being held"
else
# lock is still held. check whether we're the owner
owner=$(lock_owner $lock)
if [ "$owner" = "$requester" ];then
log "Requester already owns the lock, acquiring attempt will just reconfigure the TTL"
else
log "Lock is held by someone else ($owner)"
return 1
fi
fi
else
log "Lock is not held yet"
fi
# prepare the lock info
expiry=$(($(date +%s) + $ttl))
if [ -n "$lock" ]; then
log "Attempting to update the lock"
lock_update $lockname "$lock" "$requester:$expiry"
rc=$?
else
log "Attempting to acquire the lock"
lock_create $lockname "$requester:$expiry"
rc=$?
fi
if [ $rc -eq 0 ]; then
log "Lock '$lockname' acquired by '$requester', valid until $(date -d @$expiry)"
return 0
else
log "CIB changed, lock cannot be acquired"
return 3
fi
}
# The lock mechanism uses the CIB's num_updates tag to implement
# a conditional store. Cluster-wide locking is guaranteed by pacemaker
lock_release() {
local lockname=$1
local requester=$2
local rc
local lock
local owner
log "Check whether the lock is already held in the CIB"
lock=$(lock_get $lockname)
rc=$?
if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then
log "Could not retrieve info from the CIB"
return 2
fi
if [ -z "$lock" ]; then
log "Lock doesn't exist. Nothing to release"
return 0
else
log "Lock exists, check whether we're the owner"
owner=$(lock_owner $lock)
if [ "$owner" != "$requester" ];then
log "Lock is held by someone else ($owner), will not unlock"
return 1
fi
fi
lock_delete $lockname "$lock"
rc=$?
if [ $rc -eq 0 ]; then
log "Lock '$lockname' released by '$requester'"
return 0
else
log "CIB deletion error, lock cannot be released"
return 3
fi
}
# Retrieve the owner of a lock from the CIB
# this is a read-only operation, so no need to log debug info
lock_get_owner() {
local lockname=$1
local rc
local lock
local owner
lock=$(lock_get $lockname)
rc=$?
if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then
return 2
fi
if [ -z "$lock" ]; then
return 1
else
lock_owner $lock
return 0
fi
}
ACTION=$1
LOCKNAME=$2
REQUESTER=$3
TTL=${4:-60}
if [ -z "$ACTION" ]; then
error "Action must be specified"
fi
if [ $ACTION != "--help" ]; then
if [ -z "$LOCKNAME" ]; then
error "You must specific a lock name"
fi
if [ $ACTION != "--owner" ] && [ $ACTION != "-o" ]; then
if [ -z "$REQUESTER" ]; then
error "You must specific a lock requester"
fi
fi
fi
case $ACTION in
--help) usage; exit 0;;
--acquire|-a) try_action lock_acquire $LOCKNAME $REQUESTER $TTL;;
--release|-r) try_action lock_release $LOCKNAME $REQUESTER;;
--acquire-once|-A) lock_acquire $LOCKNAME $REQUESTER $TTL;;
--owner|-o) lock_get_owner $LOCKNAME;;
*) error "Invalid action";;
esac
exit $?

View File

@ -1,104 +0,0 @@
#!/bin/bash
set -u
# ./pacemaker_restart_bundle.sh mysql galera galera-bundle Master _
# ./pacemaker_restart_bundle.sh redis redis redis-bundle Slave Master
# ./pacemaker_restart_bundle.sh ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master
RESTART_SCRIPTS_DIR=$(dirname $0)
TRIPLEO_SERVICE=$1
RESOURCE_NAME=$2
BUNDLE_NAME=$3
WAIT_TARGET_LOCAL=$4
WAIT_TARGET_ANYWHERE=${5:-_}
TRIPLEO_MINOR_UPDATE="${TRIPLEO_MINOR_UPDATE:-false}"
TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS="${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS:-false}"
bundle_can_be_restarted() {
local bundle=$1
# As long as the resource bundle is managed by pacemaker and is
# not meant to stay stopped, no matter the state of any inner
# pcmk_remote or ocf resource, we should restart it to give it a
# chance to read the new config.
[ "$(crm_resource --meta -r $1 -g is-managed 2>/dev/null)" != "false" ] && \
[ "$(crm_resource --meta -r $1 -g target-role 2>/dev/null)" != "Stopped" ]
}
log() {
local msg=$1
logger -t pcmkrestart "$1"
}
HOSTNAME=$(/bin/hostname -s)
if [ x"${TRIPLEO_MINOR_UPDATE,,}" != x"true" ]; then
if [ x"${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS,,}" = x"false" ]; then
# Do not restart during initial deployment, as the resource
# has just been created.
SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name")
if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then
log "Initial deployment, skipping the restart of ${BUNDLE_NAME}"
fi
exit 0
else
# During a stack update, this script is called in parallel on
# every node the resource runs on, after the service's configs
# have been updated on all nodes. So we need to run pcs only
# once (e.g. on the service's boostrap node).
if bundle_can_be_restarted ${BUNDLE_NAME}; then
SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name")
if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then
replicas_running=$(crm_resource -Q -r $BUNDLE_NAME --locate 2>&1 | wc -l)
if [ "$replicas_running" != "0" ]; then
log "Restarting ${BUNDLE_NAME} globally. Stopping:"
/sbin/pcs resource disable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME
log "Restarting ${BUNDLE_NAME} globally. Starting:"
/sbin/pcs resource enable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME
else
log "${BUNDLE_NAME} is not running anywhere," \
"cleaning up to restart it globally if necessary"
/sbin/pcs resource cleanup $BUNDLE_NAME
fi
else
log "Skipping global restart of ${BUNDLE_NAME} on ${HOSTNAME} it will be restarted by node ${SERVICE_NODEID}"
fi
else
log "No global restart needed for ${BUNDLE_NAME}."
fi
fi
else
# During a minor update workflow however, a host gets fully
# updated before updating the next one. So unlike stack
# update, at the time this script is called, the service's
# configs aren't updated on all nodes yet. So only restart the
# resource locally, where it's guaranteed that the config is
# up to date.
HOST=$(facter hostname)
if bundle_can_be_restarted ${BUNDLE_NAME}; then
# if the resource is running locally, restart it
if crm_resource -r $BUNDLE_NAME --locate 2>&1 | grep -w -q "${HOST}"; then
log "Restarting ${BUNDLE_NAME} locally on '${HOST}'"
/sbin/pcs resource restart $BUNDLE_NAME "${HOST}"
else
# At this point, if no resource is running locally, it's
# either because a) it has failed previously, or b) because
# it's an A/P resource running elsewhere.
# By cleaning up resource, we ensure that a) it will try to
# restart, or b) it won't do anything if the resource is
# already running elsewhere.
log "${BUNDLE_NAME} is currently not running on '${HOST}'," \
"cleaning up its state to restart it if necessary"
/sbin/pcs resource cleanup $BUNDLE_NAME node="${HOST}"
fi
# Wait until the resource is in the expected target state
$RESTART_SCRIPTS_DIR/pacemaker_wait_bundle.sh \
$RESOURCE_NAME $BUNDLE_NAME \
"$WAIT_TARGET_LOCAL" "$WAIT_TARGET_ANYWHERE" \
"${HOST}" __PCMKTIMEOUT__
else
log "No restart needed for ${BUNDLE_NAME}."
fi
fi

View File

@ -1,333 +0,0 @@
#!/bin/bash
# ----
# Wait for an OCF resource or a bundle to be restarted
# ----
# e.g.:
# M/S OCF: $0 galera galera-bundle Master
# clone OCF: $0 rabbitmq rabbitmq-bundle Started
# A/P M/S OCF: $0 redis redis-bundle Slave Master
# A/P bundle: $0 openstack-cinder-volume openstack-cinder-volume _ Started
# clone bundle: $0 haproxy-bundle haproxy-bundle Started
# design note 1:
# - this script is called during a minor update; it is called
# once per node that hosts a service replica.
# - the purpose of this script is to ensure that restarting the
# service replica locally won't disrupt the service availability
# for the end user. To reach that goal, the script waits until the
# service is restarted locally or globally and reaches a given
# target state (i.e. Started, Slave or Master).
# design note 2:
# - we don't want to track restart error: our only job is to ensure
# service restart synchronization, not service health.
# - In particular, we don't want to error out in case the resource
# cannot be restarted locally, because that would make the minor
# update fail, even if potentially other replicas still provide
# the service.
# design note 3:
# - we can bail out early if we determine that the resource can't
# be restarted automatically by pacemaker (e.g. its "blocked",
# unmanaged or disabled).
log() {
local msg=$1
echo "$(date -u): $1"
}
usage() {
echo 2>&1 "Usage: $0 NAME BUNDLE_NAME ROLE_LOCAL [ROLE_ANYWHERE] [HOST] [TIMEOUT]"
exit 1
}
pacemaker_supports_promoted() {
# The Promoted token is only matched in recent pacemaker versions
grep -wq "<value>Promoted</value>" /usr/share/pacemaker/resources-*.rng
}
#
# Utility functions to detect stuck resources
#
bundle_failures_locally() {
local engine=$BUNDLE_CONTAINER_ENGINE
local replicas=$BUNDLE_REPLICAS
local last=$(($replicas - 1))
local replica_name
for i in $(seq 0 $last); do
replica_name=${BUNDLE_NAME}-${engine}-${i}
crm_failcount -q -G -r $replica_name -N $HOST
done
}
bundle_failures_globally() {
local engine=$BUNDLE_CONTAINER_ENGINE
local replicas=$BUNDLE_REPLICAS
local last=$(($replicas - 1))
for i in $(seq 0 $last); do
crm_failcount -q -G -r ${BUNDLE_NAME}-${engine}-${i}
done
}
bundle_running_globally() {
local engine=$BUNDLE_CONTAINER_ENGINE
# return the number of running bundles replica, i.e. the number of
# docker/podman resource replicas currently running in the cluster
crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:heartbeat:${engine}']/node)" -
}
ocf_failures_globally() {
local replicas=$BUNDLE_REPLICAS
local last=$(($replicas - 1))
local bundle_node
for i in $(seq 0 $last); do
bundle_node=${BUNDLE_NAME}-${i}
crm_failcount -q -G -r $NAME -N $bundle_node
done
}
did_resource_failed_locally() {
local failures
local running
local remotehost
if [ "${NAME}" != "${BUNDLE_NAME}" ]; then
# if we're dealing with an ocf resource, it is running on a
# pacemaker_remote rather that on the real host, and the
# failcounts are thus associated to the pcmk remote. Replace
# the host's name with the pcmk remote's name.
remotehost=$(crm_mon --as-xml | xmllint --xpath "string(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource/node[@name='${HOST}']/../../resource[@resource_agent='${OCF}:pacemaker:remote']/@id)" -)
if [ -n "${remotehost}" ]; then
crm_failcount -q -G -r $NAME -N $remotehost | grep -q -w INFINITY
return $?
fi
# If no pcmk remote is currently running, the failcount from
# the ocf resource is useless, compute the failcount from the
# bundle case instead (computed below).
fi
# for bundles, pacemaker can run any bundle replica locally
# (e.g. galera-bundle-docker-{0,1,2}), and a failure happens when
# there are no more replica to try.
# That is, when _at least_ one replica failed locally, and all the
# others either failed or are currently running elsewhere.
failures=$(bundle_failures_locally $HOST | grep -c -w INFINITY)
running=$(bundle_running_globally)
test $failures -gt 0 && \
test $(( $failures + $running )) -ge $BUNDLE_REPLICAS
}
did_resource_failed_globally() {
local remotecount
local failures
if [ "${NAME}" != "${BUNDLE_NAME}" ]; then
# we check the state of an ocf resource only if the
# pcmkremotes are started
remotecount=$(crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node)" -)
if [ "${remotecount}" = "0" ]; then
# no pcmkremote is running, so check the bundle state
# instead of checking the ocf resource
# bundle failed if all ${BUNDLE_REPLICAS} replicas failed
failures=$(bundle_failures_globally | grep -c -w INFINITY)
test $failures -eq $BUNDLE_REPLICAS
else
# ocf resource failed if it failed to start on
# all $BUNDLE_REPLICAS bundle nodes
failures=$(ocf_failures_globally | grep -c -w INFINITY)
test $failures -eq $BUNDLE_REPLICAS
fi
else
# bundle failed if all ${BUNDLE_REPLICAS} replicas failed
failures=$(bundle_failures_globally | grep -c -w INFINITY)
test $failures -eq $BUNDLE_REPLICAS
fi
}
# Input validation
#
NAME=$1
if [ -z "${NAME}" ]; then
echo 2>&1 "Error: argument NAME must not be empty"
exit 1
fi
BUNDLE_NAME=$2
if [ -z "${BUNDLE_NAME}" ]; then
echo 2>&1 "Error: argument BUNDLE_NAME must not be empty"
exit 1
fi
ROLE_LOCAL=$3
if [ "${ROLE_LOCAL}" = "_" ]; then
ROLE_LOCAL=""
fi
ROLE_ANYWHERE=$4
if [ "${ROLE_ANYWHERE}" = "_" ]; then
ROLE_ANYWHERE=""
fi
if [ -z "${ROLE_LOCAL}" ]; then
if [ -z "${ROLE_ANYWHERE}" ]; then
echo 2>&1 "Error: either ROLE_LOCAL or ROLE_ANYWHERE must be non empty"
exit 1
fi
else
if !(echo "${ROLE_LOCAL}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then
echo 2>&1 "Error: argument ROLE_LOCAL must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'"
exit 1
fi
fi
if [ -n "${ROLE_ANYWHERE}" ] && !(echo "${ROLE_ANYWHERE}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then
echo 2>&1 "Error: argument ROLE_ANYWHERE must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'"
exit 1
fi
# Ensure compatibility with pacemaker 2.1
if pacemaker_supports_promoted; then
ROLE_LOCAL=$(echo "$ROLE_LOCAL" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/')
ROLE_ANYWHERE=$(echo "$ROLE_ANYWHERE" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/')
OCF="ocf"
else
OCF="ocf:"
fi
HOST=${5:-$(facter hostname)}
TIMEOUT=${6:-__PCMKTIMEOUT__}
# Configure the search
# ----
# Note: we can't use crm_resource in all searches because we can't
# easily extract the host the OCF resources run on (crm_resource
# returns the pcmk-remote nodes rather than the hosts)
# So instead, we implement various searches with XPath directly.
if [ "${BUNDLE_NAME}" != "${NAME}" ]; then
# ocf resource
local_resource_xpath="//bundle/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node[@name='${HOST}']/../../resource[@id='${NAME}']"
any_resource_xpath="//bundle//resource[@id='${NAME}']"
replicas_xpath="//bundle/primitive[@id='${BUNDLE_NAME}']/../*[boolean(@image) and boolean(@replicas)]"
else
# bundle resource
local_resource_xpath="//bundle[@id='${NAME}']/replica/resource/node[@name='${HOST}']/../../resource"
any_resource_xpath="//bundle[@id='${NAME}']//resource"
replicas_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]"
fi
bundle_def_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]"
BUNDLE_CONTAINER_ENGINE=$(cibadmin -Q | xmllint --xpath "name(${bundle_def_xpath})" -)
BUNDLE_REPLICAS=$(cibadmin -Q | xmllint --xpath "string(${bundle_def_xpath}/@replicas)" -)
# The wait algorithm follows a two-stage approach
# 1. Depending on how the script is called, we first check whether
# the resource is restarted locally. An A/P resource may be
# restarted elsewhere in the cluster.
# 2. If needed, check whether the A/P resource has restarted
# elsewhere. For A/P M/S resources, in case the resource is
# restarted as Slave locally, ensure a Master is available.
success=1
bailout=1
timeout=$TIMEOUT
role=""
# Stage 1: local check
if [ -n "$ROLE_LOCAL" ]; then
log "Waiting until ${NAME} has restarted on ${HOST} and is in state ${ROLE_LOCAL}"
log "Will probe resource state with the following XPath pattern: ${local_resource_xpath}"
while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do
resource=$(crm_mon -r --as-xml | xmllint --xpath "${local_resource_xpath}" - 2>/dev/null)
role=$(echo "${resource}" | sed -ne 's/.*\Wrole="\([^"]*\)".*/\1/p')
if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then
log "${NAME} is unmanaged, will never reach target role. Bailing out"
bailout=0
continue
elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then
log "${NAME} is disabled, will never reach target role. Bailing out"
bailout=0
continue
elif echo "${resource}" | grep -q -w "\Wblocked=\"true\""; then
log "${NAME} is blocked, will never reach target role. Bailing out"
bailout=0
continue
elif did_resource_failed_locally; then
log "${NAME} is in failed state, will never reach target role. Bailing out"
bailout=0
continue
elif [ "$role" = "$ROLE_LOCAL" ]; then
success=0
continue
elif [ -n "$ROLE_ANYWHERE" ] && [ "$role" = "$ROLE_ANYWHERE" ]; then
# A/P: we are restarted in the expected state
success=0
continue
else
log "Waiting for ${NAME} to transition to role ${ROLE_LOCAL} on ${HOST}"
fi
if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then
sleep 4
timeout=$((timeout-4))
fi
done
fi
# Stage 2: global check
if [ $timeout -gt 0 ] && [ -n "$ROLE_ANYWHERE" ] && [ "$role" != "$ROLE_ANYWHERE" ]; then
log "Wait until ${NAME} is restarted anywhere in the cluster in state ${ROLE_ANYWHERE}"
log "Will probe resource state with the following XPath pattern: ${any_resource_xpath}"
success=1
bailout=1
while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do
resources=$(crm_mon -r --as-xml | xmllint --xpath "${any_resource_xpath}" - 2>/dev/null)
if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then
log "${NAME} is unmanaged, will never reach target role. Bailing out"
bailout=0
continue
elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then
log "${NAME} is disabled, will never reach target role. Bailing out"
bailout=0
continue
elif ! (echo "${resources}" | grep -q -w "\Wblocked=\"false\""); then
log "${NAME} blocked, will never reach target role. Bailing out"
bailout=0
continue
elif did_resource_failed_globally; then
log "${NAME} is in failed state, will never reach target role. Bailing out"
bailout=0
continue
elif echo "${resources}" | grep -q -w "\Wrole=\"${ROLE_ANYWHERE}\""; then
success=0
continue
else
log "Waiting for ${NAME} to transition to role ${ROLE_ANYWHERE} anywhere in the cluster"
fi
if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then
sleep 4
timeout=$((timeout-4))
fi
done
fi
if [ $timeout -le 0 ]; then
log "Timeout reached after ${TIMEOUT}s while waiting for ${NAME} to be restarted"
elif [ $bailout -le 0 ]; then
log "Restart monitoring for ${NAME} cancelled"
fi
if [ $success -eq 0 ]; then
log "${NAME} successfully restarted"
else
log "${NAME} was not restarted properly"
fi
# Don't block minor update or stack update if the wait was unsuccessful
exit 0

View File

@ -1,134 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# shell script to check if placement API is up after X attempts.
# Default max is 60 iterations with 10s (default) timeout in between.
from configparser import ConfigParser
import logging
import logging.handlers
import os
import re
import sys
import time
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client
import requests
config = ConfigParser(strict=False)
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
LOG = logging.getLogger('placement_wait_for_service')
LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s '
'%(name)s %(message)s')
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
LOG.handlers.clear()
LOG.setLevel(loglevel)
LOG.propagate = True
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
LOG.addHandler(stream_handler)
iterations = 60
timeout = 10
placement_cfg = '/etc/placement/placement.conf'
if __name__ == '__main__':
if os.path.isfile(placement_cfg):
try:
config.read(placement_cfg)
except Exception:
LOG.exception('Error while reading placement.conf:')
else:
LOG.error('Placement configuration file %s does not exist',
placement_cfg)
sys.exit(1)
# get keystone client with details from [keystone_authtoken] section
auth = v3.Password(
user_domain_name=config.get('keystone_authtoken', 'user_domain_name'),
username=config.get('keystone_authtoken', 'username'),
password=config.get('keystone_authtoken', 'password'),
project_name=config.get('keystone_authtoken', 'project_name'),
project_domain_name=config.get('keystone_authtoken',
'project_domain_name'),
auth_url=config.get('keystone_authtoken', 'auth_url') + '/v3')
sess = session.Session(auth=auth, verify=False)
keystone = client.Client(session=sess, interface='internal')
iterations_endpoint = iterations
placement_endpoint_url = None
while iterations_endpoint > 1:
iterations_endpoint -= 1
try:
# get placement service id
placement_service_id = keystone.services.list(
name='placement')[0].id
# get placement endpoint
# Note: puppet-placement does not support setting the interface
# until we have https://review.opendev.org/688862.
# Lets hard code 'internal' for now.
placement_endpoint_url = keystone.endpoints.list(
service=placement_service_id,
region=config.get('keystone_authtoken', 'region_name'),
interface='internal')[0].url
if not placement_endpoint_url:
LOG.error('Failed to get placement service endpoint!')
else:
break
except Exception:
LOG.exception('Retry - Failed to get placement service endpoint:')
time.sleep(timeout)
if not placement_endpoint_url:
LOG.error('Failed to get placement service endpoint!')
sys.exit(1)
# we should have CURRENT in the request response from placement:
# {"versions": [{"status": "CURRENT", "min_version": "1.0", "max_version":
# "1.29", "id": "v1.0", "links": [{"href": "", "rel": "self"}]}]}
response_reg = re.compile('.*CURRENT,*')
while iterations > 1:
iterations -= 1
try:
r = requests.get(placement_endpoint_url + '/', verify=False)
if r.status_code == 200 and response_reg.match(r.text):
LOG.info('Placement service up! - %s', r.text)
sys.exit(0)
else:
LOG.info('response - %r', r)
LOG.info('Placement service not up - %s, %s',
r.status_code,
r.text)
except Exception:
LOG.exception('Error query the placement endpoint:')
time.sleep(timeout)
sys.exit(1)
# vim: set et ts=4 sw=4 :

View File

@ -1,45 +0,0 @@
#!/bin/bash
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Usage: pyshim.sh <script and/or arguments>
#
# Unfortunately THT doesn't know which version of python might be in a
# container so we need this script to be able to try python3 or python2
# depending on availability. Since this is a temporary shim until we've
# fully cut over to python3, we check for the existence of python3 first
# before falling back to python2. This will help in the transition from
# python2 based containers to python3.
show_usage() {
echo "Usage: pyshim.sh <script and/or arguments>"
}
if [ $# -lt 1 ]
then
show_usage
exit 1
fi
set -x
if command -v python3 >/dev/null; then
python3 "$@"
elif command -v python2 >/dev/null; then
python2 "$@"
elif command -v python >/dev/null; then
python "$@"
else
echo "ERROR: python is not available!"
exit 1
fi

View File

@ -1,168 +0,0 @@
#
# Copyright 2022 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from container_config_scripts.nova_api_ensure_default_cells import parse_list_cells
from container_config_scripts.nova_api_ensure_default_cells import replace_db_name
from container_config_scripts.nova_api_ensure_default_cells import template_netloc_credentials
from container_config_scripts.nova_api_ensure_default_cells import template_url
from oslotest import base
class TemplateNetlocCredentialsCase(base.BaseTestCase):
def test_host(self):
test_netloc = 'example.com'
expected_netloc = test_netloc
templated_netloc = template_netloc_credentials(test_netloc)
self.assertEqual(expected_netloc, templated_netloc)
def test_host_port(self):
test_netloc = 'example.com:1234'
expected_netloc = test_netloc
templated_netloc = template_netloc_credentials(test_netloc)
self.assertEqual(expected_netloc, templated_netloc)
def test_host_port_ipv6(self):
test_netloc = '[dead:beef::1]:1234'
expected_netloc = test_netloc
templated_netloc = template_netloc_credentials(test_netloc)
self.assertEqual(expected_netloc, templated_netloc)
def test_username(self):
test_netloc = 'foo@example.com'
expected_netloc = '{username}@example.com'
templated_netloc = template_netloc_credentials(test_netloc)
self.assertEqual(expected_netloc, templated_netloc)
def test_userpass(self):
test_netloc = 'foo:bar@example.com'
expected_netloc = '{username}:{password}@example.com'
templated_netloc = template_netloc_credentials(test_netloc)
self.assertEqual(expected_netloc, templated_netloc)
def test_username_index(self):
test_netloc = 'foo@example.com'
expected_netloc = '{username5}@example.com'
templated_netloc = template_netloc_credentials(test_netloc, index=5)
self.assertEqual(expected_netloc, templated_netloc)
def test_userpass_index(self):
test_netloc = 'foo:bar@example.com'
expected_netloc = '{username5}:{password5}@example.com'
templated_netloc = template_netloc_credentials(test_netloc, index=5)
self.assertEqual(expected_netloc, templated_netloc)
class TemplateUrlCase(base.BaseTestCase):
def test_simple_url(self):
test_url = 'scheme://foo:bar@example.com:12345/?param=foo&param=bar#blah'
expected_url = 'scheme://{username}:{password}@example.com:12345/?param=foo&param=bar#blah'
templated_url = template_url(test_url)
self.assertEqual(expected_url, templated_url)
def test_ha_url(self):
test_url = 'scheme://foo:bar@example.com:12345,foo2:bar2@example2.com:6789,foo3:bar3@example3.com:4321/?param=foo&param=bar#blah'
expected_url = 'scheme://{username1}:{password1}@example.com:12345,{username2}:{password2}@example2.com:6789,{username3}:{password3}@example3.com:4321/?param=foo&param=bar#blah'
templated_url = template_url(test_url)
self.assertEqual(expected_url, templated_url)
def test_ha_ipv6_url(self):
test_url = 'scheme://foo:bar@[dead:beef::1]:12345,foo2:bar2@[dead:beef::2]:6789,foo3:bar3@[dead:beef::3]:4321/?param=foo&param=bar#blah'
expected_url = 'scheme://{username1}:{password1}@[dead:beef::1]:12345,{username2}:{password2}@[dead:beef::2]:6789,{username3}:{password3}@[dead:beef::3]:4321/?param=foo&param=bar#blah'
templated_url = template_url(test_url)
self.assertEqual(expected_url, templated_url)
class ParseListCellsCase(base.BaseTestCase):
def test_no_output(self):
test_output = ''
self.assertRaises(ValueError, parse_list_cells, test_output)
def test_no_cells(self):
test_output = '''\
+------+------+---------------+---------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+------+------+---------------+---------------------+----------+
+------+------+---------------+---------------------+----------+
'''
expected_cell_dicts = ({}, {})
cell_dicts = parse_list_cells(test_output)
self.assertEqual(expected_cell_dicts, cell_dicts)
def test_cell0(self):
test_output = '''\
+-------+--------------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/// | mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |
+-------+--------------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
'''
expected_cell0_dict = {
'name': 'cell0',
'uuid': '00000000-0000-0000-0000-000000000000',
'transport_url': 'none:///',
'database_connection': 'mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
}
expected_cell_dicts = (
{
'cell0': expected_cell0_dict
},
{
'00000000-0000-0000-0000-000000000000': expected_cell0_dict
}
)
cell_dicts = parse_list_cells(test_output)
self.assertEqual(expected_cell_dicts, cell_dicts)
def test_default_cells(self):
test_output = '''\
+---------+--------------------------------------+--------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+---------+--------------------------------------+--------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/// | mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |
| default | 541ca4e9-15f7-4178-95de-8af9e3659daf | rabbit://guest:oLniT3uE12BLP4VsyoFt29k3U@controller-0.internalapi.redhat.local:5672/?ssl=1 | mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |
+---------+--------------------------------------+--------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
'''
expected_cell0_dict = {
'name': 'cell0',
'uuid': '00000000-0000-0000-0000-000000000000',
'transport_url': 'none:///',
'database_connection': 'mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
}
expected_default_dict = {
'name': 'default',
'uuid': '541ca4e9-15f7-4178-95de-8af9e3659daf',
'transport_url': 'rabbit://guest:oLniT3uE12BLP4VsyoFt29k3U@controller-0.internalapi.redhat.local:5672/?ssl=1',
'database_connection': 'mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
}
expected_cell_dicts = (
{
'cell0': expected_cell0_dict,
'default': expected_default_dict
},
{
'00000000-0000-0000-0000-000000000000': expected_cell0_dict,
'541ca4e9-15f7-4178-95de-8af9e3659daf': expected_default_dict
}
)
cell_dicts = parse_list_cells(test_output)
self.assertEqual(expected_cell_dicts, cell_dicts)
class ReplaceDbNameCase(base.BaseTestCase):
def test_replace_db_name(self):
test_db_url = 'mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
expected_db_url = 'mysql+pymysql://nova:GsrvXnnW6Oam6Uz1CraPS46PV@overcloud.internalapi.redhat.local/foobar?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
db_url = replace_db_name(test_db_url, 'foobar')
self.assertEqual(expected_db_url, db_url)

View File

@ -1,426 +0,0 @@
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import contextlib
import os
from os import stat as orig_stat
import stat
import sys
from oslotest import base
class FakeSelinux(object):
@staticmethod
def lgetfilecon(path):
pass
@staticmethod
def lsetfilecon(path, context):
pass
sys.modules["selinux"] = FakeSelinux
from container_config_scripts.nova_statedir_ownership import get_exclude_paths # noqa: E402
from container_config_scripts.nova_statedir_ownership import \
NovaStatedirOwnershipManager # noqa: E402
from container_config_scripts.nova_statedir_ownership import PathManager # noqa: E402
# Real chown would require root, so in order to test this we need to fake
# all of the methods that interact with the filesystem
current_uid = 100
current_gid = 100
class FakeStatInfo(object):
def __init__(self, st_mode, st_uid, st_gid):
self.st_mode = st_mode
self.st_uid = st_uid
self.st_gid = st_gid
def get_ids(self):
return (self.st_uid, self.st_gid)
def generate_testtree1(nova_uid, nova_gid):
return {
'/var/lib/nova': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': False,
},
'/var/lib/_nova_secontext': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': False,
},
'/var/lib/nova/instances': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': False,
},
'/var/lib/nova/instances/removeddir': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': False,
'removed_when': 'listdir'
},
'/var/lib/nova/instances/removedfile': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=0,
st_gid=nova_gid),
'nfs': False,
'removed_when': 'lgetfilecon'
},
'/var/lib/nova/instances/removedfile2': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=0,
st_gid=nova_gid),
'nfs': False,
'removed_when': 'lsetfilecon'
},
'/var/lib/nova/instances/removedfile3': {
'nfs': False,
'removed_when': 'stat'
},
'/var/lib/nova/instances/removeddir2': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': False,
'removed_when': 'lsetfilecon'
},
'/var/lib/nova/instances/foo': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': True,
},
'/var/lib/nova/instances/foo/bar': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=0,
st_gid=0),
'nfs': True,
},
'/var/lib/nova/instances/foo/baz': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': True,
},
'/var/lib/nova/instances/foo/removeddir': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=nova_uid,
st_gid=nova_gid),
'nfs': True,
'removed_when': 'listdir'
},
'/var/lib/nova/instances/foo/removeddir2': {
'stat': FakeStatInfo(st_mode=stat.S_IFDIR,
st_uid=0,
st_gid=nova_gid),
'nfs': True,
'removed_when': 'chown'
},
'/var/lib/nova/instances/foo/abc': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=0,
st_gid=nova_gid),
'nfs': True,
},
'/var/lib/nova/instances/foo/def': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=nova_uid,
st_gid=0),
'nfs': True,
},
}
def generate_testtree2(marker_uid, marker_gid, *args, **kwargs):
tree = generate_testtree1(*args, **kwargs)
tree.update({
'/var/lib/nova/upgrade_marker': {
'stat': FakeStatInfo(st_mode=stat.S_IFREG,
st_uid=marker_uid,
st_gid=marker_gid),
'nfs': False,
}
})
return tree
def check_removed(path, op, testtree):
if op == testtree.get(path, {}).get('removed_when', ''):
raise OSError(2, 'No such file or directory: ' + path)
def generate_fake_stat(testtree):
def fake_stat(path):
check_removed(path, 'stat', testtree)
if path.startswith('/var'):
return testtree.get(path, {}).get('stat')
else:
# Tracebacks need to use the real stat
return orig_stat(path)
return fake_stat
def generate_fake_chown(testtree):
def fake_chown(path, uid, gid):
check_removed(path, 'chown', testtree)
if uid != -1:
testtree[path]['stat'].st_uid = uid
if gid != -1:
testtree[path]['stat'].st_gid = gid
return fake_chown
def generate_fake_exists(testtree):
def fake_exists(path):
check_removed(path, 'exists', testtree)
return path in testtree
return fake_exists
def generate_fake_listdir(testtree):
def fake_listdir(path):
check_removed(path, 'listdir', testtree)
path_parts = path.split('/')
for entry in testtree:
entry_parts = entry.split('/')
if (entry_parts[:len(path_parts)] == path_parts and
len(entry_parts) == len(path_parts) + 1):
yield entry
return fake_listdir
def generate_fake_unlink(testtree):
def fake_unlink(path):
check_removed(path, 'unlink', testtree)
del testtree[path]
return fake_unlink
def generate_fake_lgetfilecon(testtree):
def fake_lgetfilecon(path):
check_removed(path, 'lgetfilecon', testtree)
def generate_fake_lsetfilecon(testtree):
def fake_lsetfilecon(path, context):
check_removed(path, 'lsetfilecon', testtree)
if testtree[path]['nfs']:
raise OSError(95, 'Operation not supported')
@contextlib.contextmanager
def fake_testtree(testtree):
fake_stat = generate_fake_stat(testtree)
fake_chown = generate_fake_chown(testtree)
fake_exists = generate_fake_exists(testtree)
fake_listdir = generate_fake_listdir(testtree)
fake_unlink = generate_fake_unlink(testtree)
fake_lsetfilecon = generate_fake_lsetfilecon(testtree)
fake_lgetfilecon = generate_fake_lgetfilecon(testtree)
with mock.patch('os.chown',
side_effect=fake_chown) as fake_chown:
with mock.patch('os.path.exists',
side_effect=fake_exists) as fake_exists:
with mock.patch('os.listdir',
side_effect=fake_listdir) as fake_listdir:
with mock.patch('pwd.getpwnam',
return_value=(0, 0, current_uid, current_gid)):
with mock.patch('os.stat',
side_effect=fake_stat) as fake_stat:
with mock.patch(
'os.unlink',
side_effect=fake_unlink
) as fake_unlink:
with mock.patch(
'selinux.lgetfilecon',
side_effect=fake_lgetfilecon,
return_value=[10, 'newcontext']
) as fake_lgetfilecon:
with mock.patch(
'selinux.lsetfilecon',
side_effect=fake_lsetfilecon,
) as fake_lsetfilecon:
yield (fake_chown,
fake_exists,
fake_listdir,
fake_stat,
fake_unlink,
fake_lgetfilecon,
fake_lsetfilecon)
def assert_ids(testtree, path, uid, gid):
statinfo = testtree[path]['stat']
assert (uid, gid) == (statinfo.st_uid, statinfo.st_gid), \
"{}: expected ownership {}:{} actual {}:{}".format(
path, uid, gid, statinfo.st_uid, statinfo.st_gid
)
class PathManagerCase(base.BaseTestCase):
def test_file(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
self.assertTrue(pathinfo.has_either(current_uid, 0))
self.assertTrue(pathinfo.has_either(0, current_gid))
self.assertFalse(pathinfo.is_dir)
self.assertEqual(str(pathinfo), 'uid: {} gid: {} path: {}'.format(
current_uid, current_gid, '/var/lib/nova/instances/foo/baz'
))
def test_dir(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
self.assertTrue(pathinfo.has_either(current_uid, 0))
self.assertTrue(pathinfo.has_either(0, current_gid))
self.assertTrue(pathinfo.is_dir)
self.assertEqual(str(pathinfo), 'uid: {} gid: {} path: {}'.format(
current_uid, current_gid, '/var/lib/nova/'
))
def test_chown(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid + 1, current_gid)
assert_ids(testtree, pathinfo.path, current_uid + 1, current_gid)
def test_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid, current_gid + 1)
assert_ids(testtree, pathinfo.path, current_uid, current_gid + 1)
def test_chown_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid + 1, current_gid + 1)
assert_ids(testtree, pathinfo.path,
current_uid + 1, current_gid + 1)
class NovaStatedirOwnershipManagerTestCase(base.BaseTestCase):
def test_no_upgrade_marker(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree) as (fake_chown, _, _, _, _, _, fake_lsetfilecon):
NovaStatedirOwnershipManager('/var/lib/nova').run()
fake_chown.assert_called_once_with('/var/lib/nova/instances/foo/removeddir2', 100, -1)
fake_lsetfilecon.assert_any_call('/var/lib/nova', 'newcontext')
fake_lsetfilecon.assert_any_call('/var/lib/nova/instances/foo', 'newcontext')
chcon_paths = [x[0][0] for x in fake_lsetfilecon.call_args_list]
self.assertNotIn('/var/lib/nova/instances/foo/bar', chcon_paths)
def test_upgrade_marker_no_id_change(self):
testtree = generate_testtree2(current_uid,
current_gid,
current_uid,
current_gid)
with fake_testtree(testtree) as (fake_chown, _, _, _, fake_unlink, _, _):
NovaStatedirOwnershipManager('/var/lib/nova').run()
fake_chown.assert_called_once_with('/var/lib/nova/instances/foo/removeddir2', 100, -1)
fake_unlink.assert_called_with('/var/lib/nova/upgrade_marker')
def test_upgrade_marker_id_change(self):
other_uid = current_uid + 1
other_gid = current_gid + 1
testtree = generate_testtree2(other_uid,
other_gid,
other_uid,
other_gid)
# Determine which paths should change uid/gid
expected_changes = {}
for k, v in testtree.items():
if k == '/var/lib/nova/upgrade_marker':
# Ignore the marker, it should be deleted
continue
if k == '/var/lib/_nova_secontext':
# Ignore, outside tree
continue
if testtree[k].get('removed_when', False):
# Ignore, deleted
continue
v = v['stat']
if v.st_uid == other_uid or v.st_gid == other_gid:
expected_changes[k] = (
current_uid if v.st_uid == other_uid else v.st_uid,
current_gid if v.st_gid == other_gid else v.st_gid
)
with fake_testtree(testtree) as (_, _, _, _, fake_unlink, _, _):
NovaStatedirOwnershipManager('/var/lib/nova').run()
for fn, expected in expected_changes.items():
assert_ids(testtree, fn, expected[0], expected[1])
fake_unlink.assert_called_with('/var/lib/nova/upgrade_marker')
def test_exclude_path(self):
testtree = generate_testtree1(current_uid, current_gid)
with fake_testtree(testtree) as (
fake_chown, _, fake_listdir, fake_stat, _, _, _):
manager = NovaStatedirOwnershipManager(
'/var/lib/nova',
exclude_paths=['instances/foo/bar', '/var/lib/nova/instances/foo/removeddir']
)
manager.run()
self.assertIn('/var/lib/nova/instances/foo/bar', manager.exclude_paths)
self.assertIn('/var/lib/nova/instances/foo/removeddir', manager.exclude_paths)
self.assertNotIn(mock.call('/var/lib/nova/instances/foo/bar'), fake_stat.call_args_list)
self.assertNotIn(mock.call('/var/lib/nova/instances/foo/bar'), fake_chown.call_args_list)
self.assertNotIn(mock.call('/var/lib/nova/instances/foo/removeddir'), fake_stat.call_args_list)
self.assertNotIn(mock.call('/var/lib/nova/instances/foo/removeddir'), fake_chown.call_args_list)
self.assertNotIn(mock.call('/var/lib/nova/instances/foo/removeddir'), fake_listdir.call_args_list)
@mock.patch.dict(os.environ, {'NOVA_STATEDIR_OWNERSHIP_SKIP': 'foo:bar:foo/bar/baz'})
def test_get_exclude_paths(self):
expected = [
'foo',
'bar',
'foo/bar/baz'
]
exclude_paths = get_exclude_paths()
self.assertEqual(exclude_paths, expected)

View File

@ -1,18 +0,0 @@
#!/bin/bash
set -eu
HOST=$1
PORT=$2
echo "$(date -u): Checking whether we can bind to ${HOST}:${PORT}"
while (ss -Htnl src "${HOST}" "sport = :${PORT}" | grep -wq "${PORT}"); do
echo "$(date -u): ${HOST}:${PORT} still in use, waiting...";
sleep 10;
done
shift 2
COMMAND="$*"
if [ -z "${COMMAND}" ]; then
COMMAND="true"
fi
exec $COMMAND

View File

@ -1,73 +0,0 @@
TripleO with Deployed Servers
=============================
The deployed-server set of templates can be used to deploy TripleO via
tripleo-heat-templates to servers that are already installed with a base
operating system.
When OS::TripleO::Server is mapped to the deployed-server.yaml template via the
provided deployed-server-environment.yaml resource registry, Nova and Ironic
are not used to create any server instances. Heat continues to create the
SoftwareDeployment resources, and they are made available to the already
deployed and running servers.
Template Usage
--------------
To use these templates pass the included environment file to the deployment
command::
-e environments/deployed-server-environment.yaml
Deployed Server configuration
-----------------------------
It is currently assumed that the deployed servers being used have the required
set of software and packages already installed on them. These exact
requirements must match how such a server would look if it were deployed the
standard way via Ironic using the TripleO overcloud-full image.
An easy way to help get this setup for development is to use an overcloud-full
image from an already existing TripleO setup. Create the vm's for the already
deployed server, and use the overcloud-full image as their disk.
Each server must have a fqdn set that resolves to an IP address on a routable
network (e.g., the hostname should not resolve to 127.0.0.1). The hostname
will be detected on each server via the hostnamectl --static command.
Each server also must have a route to the configured IP address on the
undercloud where the OpenStack services are listening. This is the value for
local_ip in the undercloud.conf.
It's recommended that each server have at least 2 nic's. One used for external
management such as ssh, and one used for the OpenStack deployment itself. Since
the overcloud deployment will reconfigure networking on the configured nic to
be used by OpenStack, the external management nic is needed as a fallback so
that all connectivity is not lost in case of a configuration error. Be sure to
use correct nic config templates as needed, since the nodes will not receive
dhcp from the undercloud neutron-dhcp-agent service.
For example, the net_config_static_bridge.j2 template could be used for
controllers, and the net_config_static.j2 template could be used for computes
by specifying:
parameter_defaults:
ControllerNetworkConfigTemplate: 'templates/net_config_static_bridge.j2'
ComputeNetworkConfigTemplate: 'templates/net_config_static.j2'
In a setup where the first nic on the servers is used for external management,
set the nic's to be used for OpenStack to nic2:
parameter_defaults:
NeutronPublicInterface: nic2
HypervisorNeutronPublicInterface: nic2
The above nic config templates also require a route to the ctlplane network to
be defined. Define the needed parameters as necessary for your environment, for
example:
parameter_defaults:
ControlPlaneDefaultRoute: 192.168.122.130
ControlPlaneSubnetCidr: "24"
In this example, 192.168.122.130 is the external management IP of an
undercloud, thus it is the default route for the configured local_ip value of
192.168.24.1.

View File

@ -1,105 +0,0 @@
# ******************************************************************************
# DEPRECATED: The environment
# environments/deployed-server-deployed-neutron-ports.yaml, the
# deployed-neutron-port.yaml template, and DeployedServerPortMap parameter are
# deprecated in favor of NodePortMap, ControlPlaneVipData, and VipPortMap,
# which can be used with the generated environments/deployed-ports.yaml
# ******************************************************************************
heat_template_version: wallaby
description: "
A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
the input from the DeployedServerPortMap (set via parameter_defaults). This
lookup requires the use of port naming conventions. In order for this to work
with deployed-server the keys should be <hostname>-<network>.
Example:
parameter_defaults:
DeployedServerPortMap:
gatsby-ctlplane:
fixed_ips:
- ip_address: 127.0.0.1
subnets:
- cidr: 127.0.0.0/24
network:
tags:
- 127.0.0.0/24"
parameters:
name:
default: ''
type: string
dns_name:
default: ''
type: string
network:
default: ''
type: string
fixed_ips:
default: ''
type: comma_delimited_list
replacement_policy:
default: ''
type: string
tags:
default: []
type: comma_delimited_list
DeployedServerPortMap:
default: {}
type: json
IsVirtualIP: # Here for compatibility
default: false
type: boolean
Role: # Here for compatibility
description: >
The TripleO Role Name
type: string
default: ''
parameter_groups:
- label: deprecated
description: |
The following parameters are deprecated and will be removed. They should not
be relied on for new deployments. If you have concerns regarding deprecated
parameters, please contact the TripleO development team on IRC or the
Openstack mailing list.
parameters:
- DeployedServerPortMap
outputs:
fixed_ips:
value:
{get_param: [DeployedServerPortMap, {get_param: name}, fixed_ips]}
subnets:
value:
{get_param: [DeployedServerPortMap, {get_param: name}, subnets]}
network:
value:
{get_param: [DeployedServerPortMap, {get_param: name}, network]}
name:
value: {get_param: name}
status:
value: DOWN
allowed_address_pairs:
value: {}
device_id:
value: ''
device_owner:
value: {get_param: network}
dns_assignment:
value: ''
port_security_enabled:
value: False
admin_state_up:
value: False
security_groups:
value: {}
network_id:
value: ''
tenant_id:
value: ''
qos_policy_id:
value: ''
mac_address:
value: ''

View File

@ -1,79 +0,0 @@
heat_template_version: wallaby
parameters:
RootStackName:
description: The name of the stack/plan.
type: string
name:
type: string
default: 'deployed-server'
metadata:
type: json
default: {}
software_config_transport:
default: POLL_SERVER_CFN
type: string
ControlPlaneSubnet:
default: ctlplane-subnet
description: The name of the undercloud Neutron control plane subnet
type: string
NodePortMap:
description: >
A map of deployed port data
default: {}
type: json
resources:
deployed-server:
type: OS::Heat::DeployedServer
properties:
name: {get_param: name}
software_config_transport: {get_param: software_config_transport}
ControlPlanePortDeployedServerPortMap:
type: OS::TripleO::DeployedServer::ControlPlanePort
properties:
network: ctlplane
fixed_ips: [{subnet: {get_param: ControlPlaneSubnet}}]
name:
list_join:
- '-'
- - {get_param: name}
- ctlplane
replacement_policy: AUTO
tags:
- str_replace:
template: tripleo_stack_name=$STACK_NAME
params:
$STACK_NAME: {get_param: RootStackName}
ControlPlanePortNodePortMap:
type: OS::Heat::Value
properties:
value: {get_param: [NodePortMap, {get_param: name}, ctlplane]}
outputs:
OS::stack_id:
value: {get_resource: deployed-server}
networks:
value:
if:
- equals:
- {get_param: [NodePortMap]}
- {}
- ctlplane:
- {get_attr: [ControlPlanePortDeployedServerPortMap, fixed_ips, 0, ip_address]}
- ctlplane:
- {get_attr: [ControlPlanePortNodePortMap, value, ip_address]}
addresses:
value:
if:
- equals:
- {get_param: [NodePortMap]}
- {}
- ctlplane:
- {get_attr: [ControlPlanePortDeployedServerPortMap, fixed_ips, 0, ip_address]}
- ctlplane:
- subnets:
- cidr: {get_attr: [ControlPlanePortNodePortMap, value, ip_subnet]}
name:
value: {get_param: name}

View File

@ -1,26 +0,0 @@
#!/bin/bash
set -eu
OVERCLOUD_PLAN=${OVERCLOUD_PLAN:-"overcloud"}
# whitespace (space or newline) separated list
OVERCLOUD_HOSTS=${OVERCLOUD_HOSTS:-""}
OVERCLOUD_SSH_USER=${OVERCLOUD_SSH_USER:-"$USER"}
function get_python() {
command -v python3 || command -v python2 || command -v python || exit 1
}
function overcloud_ssh_hosts_json {
echo "$OVERCLOUD_HOSTS" | $(get_python) -c '
import json, re, sys
print(json.dumps(re.split("\s+", sys.stdin.read().strip())))'
}
echo "Running playbook to create ssh admin on deployed servers."
echo "SSH user: $OVERCLOUD_SSH_USER"
echo "Hosts: $OVERCLOUD_HOSTS"
extra_vars="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"tripleo_cloud_name\": \"$OVERCLOUD_PLAN\"}"
ansible-playbook /usr/share/ansible/tripleo-playbooks/cli-enable-ssh-admin.yaml -e "$extra_vars"

View File

@ -1,305 +0,0 @@
===================
TripleO Deployments
===================
This directory contains files that represent individual service deployments,
orchestration tools, and the configuration tools used to deploy them.
Directory Structure
-------------------
Each logical grouping of services will have a directory. Example: 'timesync'.
Within this directory related timesync services would exist to for example
configure timesync services on baremetal or via containers.
Filenaming conventions
----------------------
As a convention each deployments service filename will reflect both
the deployment engine (baremetal, or containers) along with the
config tool used to deploy that service.
The convention is <service-name>-<engine>-<config management tool>.
Examples:
deployment/aodh/aodh-api-container-puppet.yaml (containerized Aodh service configured with Puppet)
deployment/aodh/aodh-api-container-ansible.yaml (containerized Aodh service configured with Ansible)
deployment/timesync/chrony-baremetal-ansible.yaml (baremetal Chrony service configured with Ansible)
deployment/timesync/chrony-baremetal-puppet.yaml (baremetal Chrony service configured with Puppet)
Building Kolla Images
---------------------
TripleO currently relies on Kolla(Dockerfile) containers. Kolla supports
container customization and we are making use of this feature within TripleO
to inject puppet (our configuration tool of choice) into the Kolla base images.
A variety of other customizations are being made via the
tripleo-common/container-images/tripleo_kolla_template_overrides.j2 file.
To build Kolla images for TripleO adjust your kolla config [*]_ to build your
centos base image with puppet using the example below:
.. code-block::
$ cat template-overrides.j2
{% extends parent_template %}
{% set base_centos_binary_packages_append = ['puppet'] %}
{% set nova_scheduler_packages_append = ['openstack-tripleo-common'] %}
kolla-build --base centos --template-override template-overrides.j2
..
.. [*] See the
`override file <https://github.com/openstack/tripleo-common/blob/master/container-images/tripleo_kolla_template_overrides.j2>`_
which can be used to build Kolla packages that work with TripleO.
Containerized Deployment Template Structure
-------------------------------------------
Each deployment template may define a set of output values control
the underlying service deployment in a variety of ways. These output sections
are specific to the TripleO deployment architecture. The following sections
are available for containerized services.
* config_settings: This section contains service specific hiera data
can be used to generate config files for each service. This data
is ultimately processed via the container_puppet_config module in
tripleo-ansible which generates config files for each service according to
the settings here.
* kolla_config: Contains YAML that represents how to map config files
into the kolla container. This config file is typically mapped into
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
* docker_config: Data that is passed to tripleo_container_manage role to configure
a container, or step of containers at each step. See the available steps
documented below which are implemented by TripleO's cluster deployment
architecture. If you want the tasks executed only once for the bootstrap
node per a role in the cluster, use the `/usr/bin/bootstrap_host_exec`
wrapper.
* puppet_config: This section is a nested set of key value pairs
that drive the creation of config files using puppet.
Required parameters include:
* puppet_tags: Puppet resource tag names that are used to generate config
files with puppet. Only the named config resources are used to generate
a config file. Any service that specifies tags will have the default
tags of 'file,concat,file_line,augeas,cron' appended to the setting.
Example: keystone_config
* config_volume: The name of the volume (directory) where config files
will be generated for this service. Use this as the location to
bind mount into the running Kolla container for configuration.
* config_image: The name of the container image that will be used for
generating configuration files. This is often the same container
that the runtime service uses. Some services share a common set of
config files which are generated in a common base container.
* step_config: This setting controls the manifest that is used to
create container config files via puppet. The puppet tags below are
used along with this manifest to generate a config directory for
this container.
* container_puppet_tasks: This section provides data to drive the
puppet containers tooling directly. The task is executed for the
defined steps before the corresponding docker_config's step. Puppet
always sees the step number overridden as the step #6. It might be useful
for initialization of things.
Note that the tasks are executed only once for the bootstrap node per a
role in the cluster. Make sure the puppet manifest ensures the wanted
"at most once" semantics. That may be achieved via the
`<service_name>_short_bootstrap_node_name` hiera parameters automatically
evaluated for each service.
* global_config_settings: the hiera keys will be distributed to all roles
* service_config_settings: Takes an extra key to wire in values that are
defined for a service that need to be consumed by some other service.
For example:
service_config_settings:
haproxy:
foo: bar
This will set the hiera key 'foo' on all roles where haproxy is included.
Deployment steps
----------------
Similar to baremetal containers are brought up in a stepwise manner.
The current architecture supports bringing up baremetal services alongside
of containers. For each step the baremetal puppet manifests are executed
first and then any containers are brought up afterwards.
Steps correlate to the following:
Pre) Containers config files generated per hiera settings.
1) Load Balancer configuration baremetal
a) step 1 baremetal
b) step 1 containers
2) Core Services (Database/Rabbit/NTP/etc.)
a) step 2 baremetal
b) step 2 containers
3) Early Openstack Service setup (Ringbuilder, etc.)
a) step 3 baremetal
b) step 3 containers
4) General OpenStack Services
a) step 4 baremetal
b) step 4 containers
c) Keystone containers post initialization (tenant,service,endpoint creation)
5) Service activation (Pacemaker), online data migration
a) step 5 baremetal
b) step 5 containers
Update steps:
-------------
All services have an associated update_tasks output that is an ansible
snippet that will be run during update in an rolling update that is
expected to run in a rolling update fashion (one node at a time)
For Controller (where pacemaker is running) we have the following states:
1. Step=1: stop the cluster on the updated node;
2. Step=2: Pull the latest image and retag the it pcmklatest
3. Step=3: yum upgrade happens on the host.
4. Step=4: Restart the cluster on the node
5. Step=5: Verification:
Currently we test that the pacemaker services are running.
Then the usual deploy steps are run which pull in the latest image for
all containerized services and the updated configuration if any.
Note: as pacemaker is not containerized, the points 1 and 4 happen in
deployment/pacemaker/pacemaker-baremetal-puppet.yaml.
Input Parameters
----------------
Each service may define its own input parameters and defaults.
Operators will use the parameter_defaults section of any Heat
environment to set per service parameters.
Apart from service specific inputs, there are few default parameters for all
the services. Following are the list of default parameters:
* ServiceData: Mapping of service specific data. It is used to encapsulate
all the service specific data. As of now, it contains net_cidr_map, which
contains the CIDR map for all the networks. Additional data will be added
as and when required.
* ServiceNetMap: Mapping of service_name -> network name. Default mappings
for service to network names are defined in
./overcloud-resource-registry-puppet.j2.yaml, which may be overridden via
ServiceNetMap values added to a user environment file via parameter_defaults.
Use parameter_merge_strategies to merge it with the defaults.
* EndpointMap: Mapping of service endpoint -> protocol. Contains a mapping of
endpoint data generated for all services, based on the data included in
../network/endpoints/endpoint_data.yaml.
* RoleName: Name of the role on which this service is deployed. A service can
be deployed in multiple roles. This is an internal parameter (should not be
set via environment file), which is fetched from the name attribute of the
roles_data.yaml template.
* RoleParameters: Parameter specific to a role on which the service is
applied. Using the format "<RoleName>Parameters" in the parameter_defaults
of user environment file, parameters can be provided for a specific role.
For example, in order to provide a parameter specific to "Compute" role,
below is the format::
parameter_defaults:
ComputeParameters:
Param1: value
Update Steps
------------
Each service template may optionally define a `update_tasks` key,
which is a list of ansible tasks to be performed during the minor
update process. These are executed in a rolling manner node-by-node.
We allow a series of steps for the per-service update sequence via
conditionals referencing a step variable e.g `when: step|int == 2`.
Pre-upgrade Rolling Steps
-------------------------
Each service template may optionally define a
`pre_upgrade_rolling_tasks` key, which is a list of ansible tasks to
be performed before the main upgrade phase, and these tasks are
executed in a node-by-node rolling manner on the overcloud, similarly as `update_tasks`.
Upgrade Steps
-------------
Each service template may optionally define a `upgrade_tasks` key, which is a
list of ansible tasks to be performed during the upgrade process.
Similar to the `update_tasks`, we allow a series of steps for the
per-service upgrade sequence, defined as ansible tasks with a "when:
step|int == 1" for the first step, "== 2" for the second, etc.
Steps correlate to the following:
1) Perform any pre-upgrade validations.
2) Stop the control-plane services, e.g disable LoadBalancer, stop
pacemaker cluster and stop any managed resources.
The exact order is controlled by the cluster constraints.
3) Perform a package update and install new packages: A general
upgrade is done, and only new package should go into service
ansible tasks.
4) Start services needed for migration tasks (e.g DB)
5) Perform any migration tasks, e.g DB sync commands
Note that the services are not started in the upgrade tasks - we instead re-run
puppet which does any reconfiguration required for the new version, then starts
the services.
When running an OS upgrade via the tags `system_upgrade_prepare` and
`system_upgrade_run`, or the combined tag `system_upgrade`, the steps
correlate to the following:
1) Any pre-service-stop actions. (`system_upgrade_prepare`)
2) Stop all services. (`system_upgrade_prepare`)
3) Post-service-stop actions like removing packages before the
upgrade. (`system_upgrade_prepare`)
4) Step reserved for the `tripleo-packages` service. Only package
download for upgrade (under `system_upgrade_prepare` tag), and
reboot for performing the offline upgrade (under
`system_upgrade_run` tag) happens here.
5) Any post-upgrade tasks (`system_upgrade_run`).
Nova Server Metadata Settings
-----------------------------
One can use the hook of type `OS::TripleO::ServiceServerMetadataHook` to pass
entries to the nova instances' metadata. It is, however, disabled by default.
In order to overwrite it one needs to define it in the resource registry. An
implementation of this hook needs to conform to the following:
* It needs to define an input called `RoleData` of json type. This gets as
input the contents of the `role_data` for each role's ServiceChain.
* This needs to define an output called `metadata` which will be given to the
Nova Server resource as the instance's metadata.
Keystone resources management
-----------------------------
Keystone resources, such as users, roles, domains, endpoints, services, role
assignments, are now managed by `tripleo-keystone-resources`_ Ansible role.
.. _tripleo-keystone-resources: https://docs.openstack.org/tripleo-ansible/latest/roles/role-tripleo-keystone-resources.html

View File

@ -1,83 +0,0 @@
heat_template_version: wallaby
description: >
Aide service configured with Ansible
parameters:
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
AideConfPath:
description: Aide configuration file
type: string
default: '/etc/aide.conf'
AideDBPath:
description: Aide integrity database location
type: string
default: '/var/lib/aide/aide.db'
AideDBTempPath:
description: Aide integrity database temp location
type: string
default: '/var/lib/aide/aide.db.new'
AideHour:
description: Hour value for Cron Job
type: number
default: 11
AideCronUser:
description: User which creates and runs the cron job for aide
type: string
default: 'root'
AideMinute:
description: Minute value for Cron Job
type: number
default: 30
AideEmail:
description: Email address to send reports on Cron Job
type: string
default: ''
AideMuaPath:
description: Full POSIX path to mail binary
type: string
default: '/bin/mail'
AideRules:
description: A hash of Aide rules
type: json
default: {}
outputs:
role_data:
description: Role data for the aide service
value:
service_name: aide
host_prep_tasks:
- include_role:
name: aide
ansible_group_vars:
aide_rules: {get_param: AideRules}
aide_conf_path: {get_param: AideConfPath}
aide_db_path: {get_param: AideDBPath}
aide_db_temp_path: {get_param: AideDBTempPath}
aide_cron_user: {get_param: AideCronUser}
aide_hour: {get_param: AideHour}
aide_minute: {get_param: AideMinute}
aide_email: {get_param: AideEmail}
aide_mua_path: {get_param: AideMuaPath}

View File

@ -1,392 +0,0 @@
heat_template_version: wallaby
description: >
OpenStack containerized aodh service
parameters:
ContainerAodhApiImage:
description: image
type: string
tags:
- role_specific
ContainerAodhConfigImage:
description: The container image to use for the aodh config_volume
type: string
tags:
- role_specific
AodhApiLoggingSource:
type: json
default:
tag: openstack.aodh.api
file: /var/log/containers/aodh/app.log
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
EnableInternalTLS:
type: boolean
default: false
GnocchiExternalProject:
default: 'service'
description: Project name of resources creator in Gnocchi.
type: string
MonitoringSubscriptionAodhApi:
default: 'overcloud-ceilometer-aodh-api'
type: string
AodhApiPolicies:
description: |
A hash of policies to configure for Aodh API.
e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
KeystoneRegion:
type: string
default: 'regionOne'
description: Keystone region for endpoint
AodhPassword:
description: The password for the aodh services.
type: string
hidden: true
AodhAlarmHistoryTTL:
description: Number of seconds that alarm histories are kept in the database
default: '86400'
type: string
EnableAodhExpirer:
type: boolean
default: true
description: Enable aodh expirer to periodically delete alarms from db
AodhExpirerMinute:
type: string
description: >
Cron to delete alarms data from db - Minute
default: '1'
AodhExpirerHour:
type: string
description: >
Cron to delete alarms data from db - Hour
default: '0'
AodhExpirerMonthday:
type: string
description: >
Cron to delete alarms data from db - Month Day
default: '*'
AodhExpirerMonth:
type: string
description: >
Cron to delete alarms data from db - Month
default: '*'
AodhExpirerWeekday:
type: string
description: >
Cron to delete alarms from db - Week Day
default: '*'
AodhExpirerMaxDelay:
type: string
description: >
Cron to delete alarms from db - Max Delay
default: '3600'
MemcacheUseAdvancedPool:
type: boolean
description: |
Use the advanced (eventlet safe) memcached client pool.
default: true
resources:
ContainersCommon:
type: ../containers-common.yaml
MySQLClient:
type: ../database/mysql-client.yaml
AodhBase:
type: ./aodh-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ../../deployment/apache/apache-baremetal-puppet.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- ContainerAodhApiImage: ContainerAodhApiImage
ContainerAodhConfigImage: ContainerAodhConfigImage
- values: {get_param: [RoleParameters]}
- values:
ContainerAodhApiImage: {get_param: ContainerAodhApiImage}
ContainerAodhConfigImage: {get_param: ContainerAodhConfigImage}
outputs:
role_data:
description: Role data for the aodh API role.
value:
service_name: aodh_api
firewall_rules:
'128 aodh-api':
dport:
- 8042
firewall_frontend_rules:
'100 aodh_haproxy_frontend':
dport:
- 8042
firewall_ssl_frontend_rules:
'100 aodh_haproxy_frontend_ssl':
dport:
- 13042
keystone_resources:
aodh:
endpoints:
public: {get_param: [EndpointMap, AodhPublic, uri]}
internal: {get_param: [EndpointMap, AodhInternal, uri]}
admin: {get_param: [EndpointMap, AodhAdmin, uri]}
users:
aodh:
password: {get_param: AodhPassword}
roles:
- admin
- service
region: {get_param: KeystoneRegion}
service: 'alarming'
monitoring_subscription: {get_param: MonitoringSubscriptionAodhApi}
config_settings:
map_merge:
- get_attr: [AodhBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- apache::default_vhost: false
aodh::wsgi::apache::access_log_format: 'forwarded'
aodh::wsgi::apache::ssl: {get_param: EnableInternalTLS}
aodh::wsgi::apache::servername:
str_replace:
template:
"%{lookup('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
aodh::api::service_name: 'httpd'
aodh::api::enable_proxy_headers_parsing: true
aodh::api::gnocchi_external_project_owner: {get_param: GnocchiExternalProject}
aodh::api::sync_db: false
aodh::keystone::authtoken::project_name: 'service'
aodh::keystone::authtoken::user_domain_name: 'Default'
aodh::keystone::authtoken::project_domain_name: 'Default'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
aodh::keystone::authtoken::www_authenticate_uri: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::region_name: {get_param: KeystoneRegion}
aodh::keystone::authtoken::interface: 'internal'
aodh::keystone::authtoken::memcache_use_advanced_pool: {get_param: MemcacheUseAdvancedPool}
aodh::policy::policies: {get_param: AodhApiPolicies}
# NOTE: bind IP is found in hiera replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
aodh::wsgi::apache::bind_host:
str_replace:
template:
"%{lookup('$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::alarm_history_time_to_live: {get_param: AodhAlarmHistoryTTL}
tripleo::profile::base::aodh::api::enable_aodh_expirer: {get_param: EnableAodhExpirer}
aodh::expirer::minute: {get_param: AodhExpirerMinute}
aodh::expirer::hour: {get_param: AodhExpirerHour}
aodh::expirer::monthday: {get_param: AodhExpirerMonthday}
aodh::expirer::month: {get_param: AodhExpirerMonth}
aodh::expirer::weekday: {get_param: AodhExpirerWeekday}
aodh::expirer::maxdelay: {get_param: AodhExpirerMaxDelay}
service_config_settings:
map_merge:
- {get_attr: [AodhBase, role_data, service_config_settings]}
- rsyslog:
tripleo_logging_sources_aodh_api:
- {get_param: AodhApiLoggingSource}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: aodh
puppet_tags: aodh_api_paste_ini,aodh_config
step_config:
list_join:
- "\n"
- - "include tripleo::profile::base::aodh::api"
- {get_attr: [MySQLClient, role_data, step_config]}
config_image: {get_attr: [RoleParametersValue, value, ContainerAodhConfigImage]}
kolla_config:
/var/lib/kolla/config_files/aodh_api.json:
command: /usr/sbin/httpd -DFOREGROUND
config_files: &aodh_api_config_files
- source: "/var/lib/kolla/config_files/src/etc/httpd/conf.d"
dest: "/etc/httpd/conf.d"
merge: false
preserve_properties: true
- source: "/var/lib/kolla/config_files/src/etc/httpd/conf.modules.d"
dest: "/etc/httpd/conf.modules.d"
merge: false
preserve_properties: true
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
permissions: &aodh_api_permissions
- path: /var/log/aodh
owner: aodh:aodh
recurse: true
/var/lib/kolla/config_files/aodh_api_db_sync.json:
command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
config_files: *aodh_api_config_files
permissions: *aodh_api_permissions
/var/lib/kolla/config_files/aodh_api_cron.json:
command: /usr/sbin/crond -n
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
permissions:
- path: /var/log/aodh
owner: aodh:aodh
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
step_2:
aodh_init_log:
image: &aodh_api_image {get_attr: [RoleParametersValue, value, ContainerAodhApiImage]}
net: none
user: root
volumes:
- /var/log/containers/aodh:/var/log/aodh:z
- /var/log/containers/httpd/aodh-api:/var/log/httpd:z
command: ['/bin/bash', '-c', 'chown -R aodh:aodh /var/log/aodh']
step_3:
aodh_db_sync:
image: *aodh_api_image
cap_add:
- AUDIT_WRITE
net: host
privileged: false
detach: false
user: root
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
- - /var/lib/kolla/config_files/aodh_api_db_sync.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/aodh:/var/lib/kolla/config_files/src:ro
- /var/log/containers/aodh:/var/log/aodh
- /var/log/containers/httpd/aodh-api:/var/log/httpd
- if:
- {get_param: EnableInternalTLS}
- - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
- /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
environment:
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
TRIPLEO_DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
step_4:
aodh_api:
image: *aodh_api_image
net: host
privileged: false
restart: always
healthcheck:
test: /openstack/healthcheck
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
- - /var/lib/kolla/config_files/aodh_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/aodh:/var/lib/kolla/config_files/src:ro
- /var/log/containers/aodh:/var/log/aodh
- /var/log/containers/httpd/aodh-api:/var/log/httpd
- if:
- {get_param: EnableInternalTLS}
- - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
- /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
environment:
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
aodh_api_cron:
image: *aodh_api_image
net: host
user: root
privileged: false
restart: always
healthcheck:
test: '/usr/share/openstack-tripleo-common/healthcheck/cron aodh'
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
- - /var/lib/kolla/config_files/aodh_api_cron.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/aodh:/var/lib/kolla/config_files/src:ro
- /var/log/containers/aodh:/var/log/aodh
- /var/log/containers/httpd/aodh-api:/var/log/httpd
environment:
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
host_prep_tasks:
- name: create persistent logs directory
file:
path: "{{ item.path }}"
setype: "{{ item.setype }}"
state: directory
mode: "{{ item.mode|default(omit) }}"
with_items:
- { 'path': /var/log/containers/aodh, 'setype': container_file_t, 'mode': '0750' }
- { 'path': /var/log/containers/httpd/aodh-api, setype: container_file_t, 'mode': '0750' }
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
deploy_steps_tasks:
get_attr: [ApacheServiceBase, role_data, deploy_steps_tasks]
external_upgrade_tasks:
- when:
- step|int == 1
tags:
- never
- system_upgrade_transfer_data
- system_upgrade_stop_services
block:
- name: Stop aodh api container
import_role:
name: tripleo_container_stop
vars:
tripleo_containers_to_stop:
- aodh_api
- aodh_api_cron
tripleo_delegate_to: "{{ groups['aodh_api'] | difference(groups['excluded_overcloud']) }}"

View File

@ -1,107 +0,0 @@
heat_template_version: wallaby
description: >
OpenStack Aodh service configured with Puppet
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
AodhPassword:
description: The password for the aodh services.
type: string
hidden: true
RedisPassword:
description: The password for the redis service account.
type: string
hidden: true
Debug:
default: false
description: Set to True to enable debugging on all services.
type: boolean
AodhDebug:
default: false
description: Set to True to enable debugging Aodh services.
type: boolean
EnableSQLAlchemyCollectd:
type: boolean
description: >
Set to true to enable the SQLAlchemy-collectd server plugin
default: false
KeystoneRegion:
type: string
default: 'regionOne'
description: Keystone region for endpoint
NotificationDriver:
type: comma_delimited_list
default: 'noop'
description: Driver or drivers to handle sending notifications.
AodhRpcResponseTimeout:
default: 60
description: Aodh's RPC response timeout, in seconds.
type: number
outputs:
role_data:
description: Role data for the Aodh role.
value:
service_name: aodh_base
config_settings:
aodh_redis_password: {get_param: RedisPassword}
aodh::db::database_connection:
make_url:
scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
username: aodh
password: {get_param: AodhPassword}
host: {get_param: [EndpointMap, MysqlInternal, host]}
path: /aodh
query:
if:
- {get_param: EnableSQLAlchemyCollectd}
- read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
plugin: collectd
collectd_program_name: aodh
collectd_host: localhost
- read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
aodh::logging::debug:
if:
- {get_param: AodhDebug}
- true
- {get_param: Debug}
aodh::notification_driver: {get_param: NotificationDriver}
aodh::rpc_response_timeout: {get_param: AodhRpcResponseTimeout}
aodh::service_credentials::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::service_credentials::password: {get_param: AodhPassword}
aodh::service_credentials::region_name: {get_param: KeystoneRegion}
aodh::service_credentials::project_name: 'service'
aodh::service_credentials::user_domain_name: 'Default'
aodh::service_credentials::project_domain_name: 'Default'
service_config_settings:
mysql:
aodh::db::mysql::user: aodh
aodh::db::mysql::password: {get_param: AodhPassword}
aodh::db::mysql::host: '%'
aodh::db::mysql::dbname: aodh

View File

@ -1,156 +0,0 @@
heat_template_version: wallaby
description: >
OpenStack containerized Aodh Evaluator service
parameters:
ContainerAodhEvaluatorImage:
description: image
type: string
tags:
- role_specific
ContainerAodhConfigImage:
description: The container image to use for the aodh config_volume
type: string
tags:
- role_specific
AodhEvaluatorLoggingSource:
type: json
default:
tag: openstack.aodh.evaluator
file: /var/log/containers/aodh/aodh-evaluator.log
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
MonitoringSubscriptionAodhEvaluator:
default: 'overcloud-ceilometer-aodh-evaluator'
type: string
resources:
ContainersCommon:
type: ../containers-common.yaml
MySQLClient:
type: ../database/mysql-client.yaml
AodhBase:
type: ./aodh-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- ContainerAodhEvaluatorImage: ContainerAodhEvaluatorImage
ContainerAodhConfigImage: ContainerAodhConfigImage
- values: {get_param: [RoleParameters]}
- values:
ContainerAodhEvaluatorImage: {get_param: ContainerAodhEvaluatorImage}
ContainerAodhConfigImage: {get_param: ContainerAodhConfigImage}
outputs:
role_data:
description: Role data for the Aodh API role.
value:
service_name: aodh_evaluator
monitoring_subscription: {get_param: MonitoringSubscriptionAodhEvaluator}
config_settings:
get_attr: [AodhBase, role_data, config_settings]
service_config_settings:
map_merge:
- {get_attr: [AodhBase, role_data, service_config_settings]}
- rsyslog:
tripleo_logging_sources_aodh_evaluator:
- {get_param: AodhEvaluatorLoggingSource}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: aodh
puppet_tags: aodh_config
step_config:
list_join:
- "\n"
- - "include tripleo::profile::base::aodh::evaluator"
- {get_attr: [MySQLClient, role_data, step_config]}
config_image: {get_attr: [RoleParametersValue, value, ContainerAodhConfigImage]}
kolla_config:
/var/lib/kolla/config_files/aodh_evaluator.json:
command: /usr/bin/aodh-evaluator
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
permissions:
- path: /var/log/aodh
owner: aodh:aodh
recurse: true
docker_config:
step_4:
aodh_evaluator:
image: {get_attr: [RoleParametersValue, value, ContainerAodhEvaluatorImage]}
net: host
privileged: false
restart: always
healthcheck:
test: /openstack/healthcheck
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
- - /var/lib/kolla/config_files/aodh_evaluator.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/aodh:/var/lib/kolla/config_files/src:ro
- /var/log/containers/aodh:/var/log/aodh:z
environment:
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
host_prep_tasks:
- name: create persistent directories
file:
path: "{{ item.path }}"
state: directory
setype: "{{ item.setype }}"
mode: "{{ item.mode }}"
with_items:
- { 'path': /var/log/containers/aodh, 'setype': container_file_t, 'mode': '0750' }
external_upgrade_tasks:
- when:
- step|int == 1
tags:
- never
- system_upgrade_transfer_data
- system_upgrade_stop_services
block:
- name: Stop aodh evaluator container
import_role:
name: tripleo_container_stop
vars:
tripleo_containers_to_stop:
- aodh_evaluator
tripleo_delegate_to: "{{ groups['aodh_evaluator'] | difference(groups['excluded_overcloud']) }}"

Some files were not shown because too many files have changed in this diff Show More