diff --git a/.ansible-lint b/.ansible-lint deleted file mode 100644 index cabc7ce6f9..0000000000 --- a/.ansible-lint +++ /dev/null @@ -1,25 +0,0 @@ -kinds: - - j2: "**/*.j2.yaml" - - playbook: "**/common/*.yml" - - yaml: "roles/*.yaml" -mock_modules: - - tripleo_all_nodes_data -skip_list: - # TODO(zbr): Address in follow-ups - - role-name - - unnamed-task - # yaml ones - - yaml[braces] - - yaml[colons] - - yaml[commas] - - yaml[comments] - - yaml[comments-indentation] - - yaml[empty-lines] - - yaml[indentation] - - yaml[line-length] - - yaml[new-line-at-end-of-file] - - yaml[trailing-spaces] - - yaml[truthy] - - yaml[brackets] - - yaml[hyphens] - - experimental diff --git a/.gitignore b/.gitignore deleted file mode 100644 index fb10b0cc60..0000000000 --- a/.gitignore +++ /dev/null @@ -1,191 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -# Bytecompiled Python -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.testrepository -.stestr/* -cover/* -MagicMock/* - -# Translations -*.mo - -*.bundle -Gemfile.lock - -# Built by pbr (python setup.py sdist): -AUTHORS -ChangeLog - -# Sphinx -doc/build - -# Editors -*~ -.*.swp -.*sw? - -# Leftovers from other commants like tox -e pep8,tht -.cache -common/post.yaml -common/services/blockstorage-role.yaml -common/services/cephstorage-role.yaml -common/services/compute-role.yaml -common/services/controller-role.yaml -common/services/objectstorage-role.yaml -deployment/apache/apache-baremetal-puppet.yaml -deployment/apache/apache-baremetal-ansible.yaml -deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml -deployment/haproxy/haproxy-internal-tls-certmonger.yaml -deployment/octavia/octavia-deployment-config.yaml -environments/deployed-ports.yaml -environments/deployed-network-environment.yaml -environments/deployed-server-environment.yaml -environments/net-2-linux-bonds-with-vlans.yaml -environments/net-bond-with-vlans-no-external.yaml -environments/net-bond-with-vlans.yaml -environments/net-dpdkbond-with-vlans.yaml -environments/net-multiple-nics-vlans.yaml -environments/net-multiple-nics.yaml -environments/net-noop.yaml -environments/net-single-nic-linux-bridge-with-vlans.yaml -environments/net-single-nic-with-vlans-no-external.yaml -environments/net-single-nic-with-vlans.yaml -environments/network-environment-v6-all.yaml -environments/network-environment-v6.yaml -environments/network-environment.yaml -environments/network-isolation-no-tunneling.yaml -environments/network-isolation-v6-all.yaml -environments/network-isolation-v6.yaml -environments/network-isolation.yaml -environments/networks-disable.yaml -environments/overcloud-baremetal.yaml -environments/split-stack-consistent-hostname-format.yaml -environments/ssl/enable-internal-tls.yaml -extraconfig/all_nodes/swap-partition.yaml -extraconfig/all_nodes/swap.yaml -extraconfig/nova_metadata/krb-service-principals/blockstorage-role.yaml -extraconfig/nova_metadata/krb-service-principals/cephstorage-role.yaml -extraconfig/nova_metadata/krb-service-principals/compute-role.yaml -extraconfig/nova_metadata/krb-service-principals/controller-role.yaml -extraconfig/nova_metadata/krb-service-principals/objectstorage-role.yaml -extraconfig/tasks/major_upgrade_pacemaker_init.yaml -network/external.yaml -network/external_v6.yaml -network/internal_api.yaml -network/internal_api_v6.yaml -network/management.yaml -network/management_v6.yaml -network/networks.yaml -network/ports/deployed_external.yaml -network/ports/deployed_internal_api.yaml -network/ports/deployed_management.yaml -network/ports/deployed_storage.yaml -network/ports/deployed_storage_mgmt.yaml -network/ports/deployed_tenant.yaml -network/ports/deployed_vip_external.yaml -network/ports/deployed_vip_internal_api.yaml -network/ports/deployed_vip_management.yaml -network/ports/deployed_vip_storage.yaml -network/ports/deployed_vip_storage_mgmt.yaml -network/ports/deployed_vip_tenant.yaml -network/ports/external.yaml -network/ports/external_from_pool.yaml -network/ports/external_from_pool_v6.yaml -network/ports/external_resource_external.yaml -network/ports/external_resource_external_v6.yaml -network/ports/external_resource_internal_api.yaml -network/ports/external_resource_internal_api_v6.yaml -network/ports/external_resource_management.yaml -network/ports/external_resource_management_v6.yaml -network/ports/external_resource_storage.yaml -network/ports/external_resource_storage_mgmt.yaml -network/ports/external_resource_storage_mgmt_v6.yaml -network/ports/external_resource_storage_v6.yaml -network/ports/external_resource_tenant.yaml -network/ports/external_resource_tenant_v6.yaml -network/ports/external_v6.yaml -network/ports/internal_api.yaml -network/ports/internal_api_from_pool.yaml -network/ports/internal_api_from_pool_v6.yaml -network/ports/internal_api_v6.yaml -network/ports/management.yaml -network/ports/management_from_pool.yaml -network/ports/management_from_pool_v6.yaml -network/ports/management_v6.yaml -network/ports/net_ip_list_map.yaml -network/ports/net_ip_map.yaml -network/ports/net_vip_map_external.yaml -network/ports/net_vip_map_external_v6.yaml -network/ports/storage.yaml -network/ports/storage_from_pool.yaml -network/ports/storage_from_pool_v6.yaml -network/ports/storage_mgmt.yaml -network/ports/storage_mgmt_from_pool.yaml -network/ports/storage_mgmt_from_pool_v6.yaml -network/ports/storage_mgmt_v6.yaml -network/ports/storage_v6.yaml -network/ports/tenant.yaml -network/ports/tenant_from_pool.yaml -network/ports/tenant_from_pool_v6.yaml -network/ports/tenant_v6.yaml -network/service_net_map.yaml -network/storage.yaml -network/storage_mgmt.yaml -network/storage_mgmt_v6.yaml -network/storage_v6.yaml -network/tenant.yaml -network/tenant_v6.yaml -overcloud-resource-registry-puppet.yaml -overcloud.yaml -puppet/blockstorage-config.yaml -puppet/blockstorage-role.yaml -puppet/cephstorage-config.yaml -puppet/cephstorage-role.yaml -puppet/compute-config.yaml -puppet/compute-role.yaml -puppet/controller-config.yaml -puppet/controller-role.yaml -puppet/objectstorage-config.yaml -puppet/objectstorage-role.yaml -puppet/post.yaml -tripleo_heat_templates/tests/ansible-pacemaker -tripleo_heat_templates/tests/tripleo-ansible - -# Files created by releasenotes build -releasenotes/build diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index a089b409c4..0000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./} -top_dir=./ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.rst b/README.rst index bb3d5d2a50..4ee2c5f138 100644 --- a/README.rst +++ b/README.rst @@ -1,134 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/tripleo-heat-templates.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -====================== -tripleo-heat-templates -====================== - -Heat templates to deploy OpenStack using OpenStack. - -* Free software: Apache License (2.0) -* Documentation: https://docs.openstack.org/tripleo-docs/latest/ -* Source: https://opendev.org/openstack/tripleo-heat-templates -* Bugs: https://bugs.launchpad.net/tripleo -* Release notes: https://docs.openstack.org/releasenotes/tripleo-heat-templates/ - -Features --------- - -The ability to deploy a multi-node, role based OpenStack deployment using -OpenStack Heat. Notable features include: - - * Choice of deployment/configuration tooling: puppet, (soon) docker - - * Role based deployment: roles for the controller, compute, ceph, swift, - and cinder storage - - * physical network configuration: support for isolated networks, bonding, - and standard ctlplane networking - -Directories ------------ - -A description of the directory layout in TripleO Heat Templates. - - * environments: contains heat environment files that can be used with -e - on the command like to enable features, etc. - - * extraconfig: templates used to enable 'extra' functionality. Includes - functionality for distro specific registration and upgrades. - - * firstboot: example first_boot scripts that can be used when initially - creating instances. - - * network: heat templates to help create isolated networks and ports - - * puppet: templates mostly driven by configuration with puppet. To use these - templates you can use the overcloud-resource-registry-puppet.yaml. - - * validation-scripts: validation scripts useful to all deployment - configurations - - * roles: example roles that can be used with the tripleoclient to generate - a roles_data.yaml for a deployment See the - `roles/README.rst `_ for additional details. - -Service testing matrix ----------------------- - -The configuration for the CI scenarios will be defined in `tripleo-heat-templates/ci/` -and should be executed according to the following table: - -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| - | scn000 | scn001 | scn002 | scn003 | scn004 | scn006 | scn007 | scn009 | scn010 | scn013 | non-ha | ovh-ha | -+================+========+========+========+========+========+========+========+========+========+========+========+========+ -| keystone | X | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| glance | | rbd | swift | file | rgw | file | file | | rbd | file | file | file | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cinder | | rbd | iscsi | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| heat | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ironic | | | | | | X | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| mysql | X | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron | | ovn | ovn | ovn | ovn | ovn | ovs | | ovn | ovn | ovn | ovn | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron-bgpvpn | | | | | wip | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron-l2gw | | | | | wip | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| om-rpc | | rabbit | rabbit | amqp1 | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| om-notify | | rabbit | rabbit | rabbit | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| redis | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| haproxy | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| memcached | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| pacemaker | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| nova | | qemu | qemu | qemu | qemu | ironic | qemu | | qemu | qemu | qemu | qemu | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| placement | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ntp | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| snmp | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| timezone | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| swift | | | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| aodh | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ceilometer | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| gnocchi | | rbd | swift | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| barbican | | | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cephrgw | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cephmds | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| manila | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| collectd | | X | | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| designate | | | | X | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| octavia | | | | | | | | | X | X | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| Extra Firewall | | | | X | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab818b..0000000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/baremetal-samples/baremetal_deployment.yaml b/baremetal-samples/baremetal_deployment.yaml deleted file mode 100644 index 9f10c90359..0000000000 --- a/baremetal-samples/baremetal_deployment.yaml +++ /dev/null @@ -1,122 +0,0 @@ -- name: Controller - count: 3 - defaults: - network_config: - template: templates/net_config_bridge.j2 - default_route_network: - - external - instances: - - hostname: overcloud-controller-0 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.10 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.80 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.85 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.148 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.244 - - hostname: overcloud-controller-1 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.11 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.81 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.86 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.149 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.245 - - hostname: overcloud-controller-2 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.12 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.82 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.87 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.150 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.246 -- name: Compute - count: 3 - defaults: - network_config: - template: "" - instances: - - hostname: overcloud-compute-0 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.20 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.90 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.95 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.158 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.144 - - hostname: overcloud-compute-1 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.21 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.91 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.96 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.159 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.145 - - hostname: overcloud-compute-2 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.22 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.92 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.98 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.160 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.146 \ No newline at end of file diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 19bddede03..0000000000 --- a/bindep.txt +++ /dev/null @@ -1,11 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see https://docs.openstack.org/infra/bindep/ for additional information. -libssl-dev [platform:dpkg] -openssl-devel [platform:rpm] -gcc [platform:rpm] -python36-devel [platform:rhel8 platform:centos-8] -python3-devel [platform:rpm !platform:rhel8 !platform:centos-8] - -# For SELinux -python3-libselinux [platform:rpm] -python3-libsemanage [platform:rpm] diff --git a/ci/README.rst b/ci/README.rst deleted file mode 100644 index 666e711026..0000000000 --- a/ci/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================= -TripleO CI environments -======================= - -TripleO CI environments are exclusively used for Continuous Integration -purpose or for development usage. -They should not be used in production and we don't guarantee they work outside -TripleO CI. - -For more information about TripleO CI, please look: -https://github.com/openstack-infra/tripleo-ci diff --git a/ci/common/ironic_standalone_post.yaml b/ci/common/ironic_standalone_post.yaml deleted file mode 100644 index ddf44ce8d8..0000000000 --- a/ci/common/ironic_standalone_post.yaml +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: wallaby - -parameters: - servers: - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - IronicVBMCSetup: - type: vbmc_setup.yaml - properties: - servers: {get_param: servers} - diff --git a/ci/common/vbmc_setup.yaml b/ci/common/vbmc_setup.yaml deleted file mode 100644 index 6fbf1bb531..0000000000 --- a/ci/common/vbmc_setup.yaml +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: wallaby - -parameters: - servers: - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - ExtraConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: | - #!/bin/bash - set -e - dnf install -y python3-virtualbmc libvirt-client libvirt-daemon libvirt qemu-kvm - systemctl start libvirtd virtualbmc - git clone https://opendev.org/openstack/ironic /tmp/ironic - mkdir -p /var/log/bmlogs - chmod 777 /var/log/bmlogs - - # Create a ssh keypair and place the private key somewhere ansible inside the - # neutron_api container can read it. - ssh-keygen -P "" -f /etc/puppet/ci-key - chmod 644 /etc/puppet/ci-key - cat /etc/puppet/ci-key.pub >> /root/.ssh/authorized_keys - - LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} - LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images} - - mkdir -p $LIBVIRT_STORAGE_POOL_PATH - - if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then - virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target $LIBVIRT_STORAGE_POOL_PATH - virsh pool-autostart $LIBVIRT_STORAGE_POOL - virsh pool-start $LIBVIRT_STORAGE_POOL - fi - - pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') - if [ "$pool_state" != "running" ] ; then - virsh pool-start $LIBVIRT_STORAGE_POOL - fi - - /tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node1 -c 1 -m 3072 -d 10 -b br-ex -p 1161 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:11 > /var/log/bmlogs/create-node-1.log 2>&1 < /dev/null - /tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node2 -c 1 -m 3072 -d 10 -b br-ex -p 1162 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:22 > /var/log/bmlogs/create-node-2.log 2>&1 < /dev/null - - vbmc --no-daemon add node1 --port 1161 - vbmc --no-daemon start node1 - vbmc --no-daemon add node2 --port 1162 - vbmc --no-daemon start node2 - - disown -a - ExtraDeployments: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: servers} - config: {get_resource: ExtraConfig} - actions: ['CREATE'] # Only do this on CREATE - name: VirtNodeExtraConfig diff --git a/ci/custom_ci_roles_data.yaml b/ci/custom_ci_roles_data.yaml deleted file mode 100644 index 34caa2b91c..0000000000 --- a/ci/custom_ci_roles_data.yaml +++ /dev/null @@ -1,254 +0,0 @@ -############################################################################### -# File generated by TripleO -############################################################################### -############################################################################### -# Role: Controller # -############################################################################### -- name: Controller - description: | - Controller role that has all the controller services loaded and handles - Database, Messaging and Network functions. - CountDefault: 1 - tags: - - primary - - controller - # Create external Neutron bridge for SNAT (and floating IPs when using - # ML2/OVS without DVR) - - external_bridge - networks: - ExternalCloud1: - subnet: external_cloud_1_subnet - InternalApiCloud1: - subnet: internal_api_cloud_1_subnet - StorageCloud1: - subnet: storage_cloud_1_subnet - StorageMgmtCloud1: - subnet: storage_mgmt_cloud_1_subnet - TenantCloud1: - subnet: tenant_cloud_1_subnet - # For systems with both IPv4 and IPv6, you may specify a gateway network for - # each, such as ['ControlPlane', 'External'] - default_route_networks: ['ExternalCloud1'] - HostnameFormatDefault: '%stackname%-controller-%index%' - RoleParametersDefault: - OVNCMSOptions: "enable-chassis-as-gw" - # Deprecated & backward-compatible values (FIXME: Make parameters consistent) - # Set uses_deprecated_params to True if any deprecated params are used. - uses_deprecated_params: True - deprecated_param_extraconfig: 'controllerExtraConfig' - update_serial: 1 - ServicesDefault: - - OS::TripleO::Services::Aide - - OS::TripleO::Services::AodhApi - - OS::TripleO::Services::AodhEvaluator - - OS::TripleO::Services::AodhListener - - OS::TripleO::Services::AodhNotifier - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::BarbicanApi - - OS::TripleO::Services::BarbicanBackendSimpleCrypto - - OS::TripleO::Services::BarbicanBackendDogtag - - OS::TripleO::Services::BarbicanBackendKmip - - OS::TripleO::Services::BarbicanBackendPkcs11Crypto - - OS::TripleO::Services::BootParams - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::CeilometerAgentCentral - - OS::TripleO::Services::CeilometerAgentNotification - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephExternal - - OS::TripleO::Services::CephGrafana - - OS::TripleO::Services::CephMds - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephRbdMirror - - OS::TripleO::Services::CephRgw - - OS::TripleO::Services::CinderApi - - OS::TripleO::Services::CinderBackendDellSc - - OS::TripleO::Services::CinderBackendDellEMCPowerFlex - - OS::TripleO::Services::CinderBackendDellEMCPowermax - - OS::TripleO::Services::CinderBackendDellEMCPowerStore - - OS::TripleO::Services::CinderBackendDellEMCSc - - OS::TripleO::Services::CinderBackendDellEMCUnity - - OS::TripleO::Services::CinderBackendDellEMCVNX - - OS::TripleO::Services::CinderBackendDellEMCXtremio - - OS::TripleO::Services::CinderBackendIBMSvf - - OS::TripleO::Services::CinderBackendNetApp - - OS::TripleO::Services::CinderBackendNfs - - OS::TripleO::Services::CinderBackendNVMeOF - - OS::TripleO::Services::CinderBackendPure - - OS::TripleO::Services::CinderBackendRbd - - OS::TripleO::Services::CinderBackup - - OS::TripleO::Services::CinderScheduler - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::DesignateApi - - OS::TripleO::Services::DesignateCentral - - OS::TripleO::Services::DesignateProducer - - OS::TripleO::Services::DesignateWorker - - OS::TripleO::Services::DesignateMDNS - - OS::TripleO::Services::DesignateSink - - OS::TripleO::Services::DesignateBind - - OS::TripleO::Services::Etcd - - OS::TripleO::Services::ExternalSwiftProxy - - OS::TripleO::Services::Frr - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::GnocchiApi - - OS::TripleO::Services::GnocchiMetricd - - OS::TripleO::Services::GnocchiStatsd - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::Horizon - - OS::TripleO::Services::IpaClient - - OS::TripleO::Services::Ipsec - - OS::TripleO::Services::IronicApi - - OS::TripleO::Services::IronicConductor - - OS::TripleO::Services::IronicInspector - - OS::TripleO::Services::IronicPxe - - OS::TripleO::Services::IronicNeutronAgent - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::ManilaApi - - OS::TripleO::Services::ManilaBackendCephFs - - OS::TripleO::Services::ManilaBackendFlashBlade - - OS::TripleO::Services::ManilaBackendIsilon - - OS::TripleO::Services::ManilaBackendNetapp - - OS::TripleO::Services::ManilaBackendPowerMax - - OS::TripleO::Services::ManilaBackendUnity - - OS::TripleO::Services::ManilaBackendVNX - - OS::TripleO::Services::ManilaScheduler - - OS::TripleO::Services::ManilaShare - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronBgpVpnApi - - OS::TripleO::Services::NeutronSfcApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::NeutronDhcpAgent - - OS::TripleO::Services::NeutronL2gwAgent - - OS::TripleO::Services::NeutronL2gwApi - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::NeutronLinuxbridgeAgent - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::NeutronOvsAgent - - OS::TripleO::Services::NeutronAgentsIBConfig - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaIronic - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::NovaVncProxy - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::OctaviaApi - - OS::TripleO::Services::OctaviaDeploymentConfig - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::OctaviaHousekeeping - - OS::TripleO::Services::OctaviaWorker - - OS::TripleO::Services::OpenStackClients - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Redis - - OS::TripleO::Services::Rhsm - - OS::TripleO::Services::Rsyslog - - OS::TripleO::Services::RsyslogSidecar - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::SwiftProxy - - OS::TripleO::Services::SwiftDispersion - - OS::TripleO::Services::SwiftRingBuilder - - OS::TripleO::Services::SwiftStorage - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::Tuned - - OS::TripleO::Services::Unbound -############################################################################### -# Role: Compute # -############################################################################### -- name: Compute - description: | - Basic Compute Node role - CountDefault: 1 - # Create external Neutron bridge (unset if using ML2/OVS without DVR) - tags: - - compute - - external_bridge - networks: - InternalApiCloud1: - subnet: internal_api_cloud_1_subnet - TenantCloud1: - subnet: tenant_cloud_1_subnet - StorageCloud1: - subnet: storage_cloud_1_subnet - HostnameFormatDefault: '%stackname%-novacompute-%index%' - RoleParametersDefault: - TunedProfileName: "virtual-host" - # Deprecated & backward-compatible values (FIXME: Make parameters consistent) - # Set uses_deprecated_params to True if any deprecated params are used. - # These deprecated_params only need to be used for existing roles and not for - # composable roles. - uses_deprecated_params: True - deprecated_param_extraconfig: 'NovaComputeExtraConfig' - deprecated_param_metadata: 'NovaComputeServerMetadata' - deprecated_param_ips: 'NovaComputeIPs' - deprecated_server_resource_name: 'NovaCompute' - update_serial: 25 - ServicesDefault: - - OS::TripleO::Services::Aide - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::BootParams - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephExternal - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::ComputeCeilometerAgent - - OS::TripleO::Services::ComputeNeutronCorePlugin - - OS::TripleO::Services::ComputeNeutronL3Agent - - OS::TripleO::Services::ComputeNeutronMetadataAgent - - OS::TripleO::Services::ComputeNeutronOvsAgent - - OS::TripleO::Services::Frr - - OS::TripleO::Services::IpaClient - - OS::TripleO::Services::Ipsec - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronBgpVpnBagpipe - - OS::TripleO::Services::NeutronLinuxbridgeAgent - - OS::TripleO::Services::NovaAZConfig - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaLibvirtGuests - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Rhsm - - OS::TripleO::Services::Rsyslog - - OS::TripleO::Services::RsyslogSidecar - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::Tuned - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent diff --git a/ci/custom_names_network_data.yaml b/ci/custom_names_network_data.yaml deleted file mode 100644 index 10c085b2ab..0000000000 --- a/ci/custom_names_network_data.yaml +++ /dev/null @@ -1,60 +0,0 @@ -- name: ExternalCloud1 - name_lower: external_cloud_1 - mtu: 1350 - vip: true - dns_domain: externalcloud1.ooo.test. - service_net_map_replace: external - subnets: - external_cloud_1_subnet: - ip_subnet: 10.0.0.0/24 - gateway_ip: 10.0.0.1 - allocation_pools: - - start: 10.0.0.10 - end: 10.0.0.50 -- name: InternalApiCloud1 - name_lower: internal_api_cloud_1 - mtu: 1350 - vip: true - dns_domain: internalapicloud1.ooo.test. - service_net_map_replace: internal_api - subnets: - internal_api_cloud_1_subnet: - ip_subnet: 172.17.0.0/24 - allocation_pools: - - start: 172.17.0.10 - end: 172.17.0.250 -- name: StorageCloud1 - name_lower: storage_cloud_1 - mtu: 1350 - vip: true - dns_domain: storagecloud1.ooo.test. - service_net_map_replace: storage - subnets: - storage_cloud_1_subnet: - ip_subnet: 172.18.0.0/24 - allocation_pools: - - start: 172.18.0.10 - end: 172.18.0.250 -- name: StorageMgmtCloud1 - name_lower: storage_mgmt_cloud_1 - mtu: 1350 - vip: true - dns_domain: storagemgmtcloud1.ooo.test. - service_net_map_replace: storage_mgmt - subnets: - storage_mgmt_cloud_1_subnet: - ip_subnet: 172.19.0.0/24 - allocation_pools: - - start: 172.19.0.10 - end: 172.19.0.250 -- name: TenantCloud1 - name_lower: tenant_cloud_1 - mtu: 1350 - dns_domain: tenantcloud1.ooo.test. - service_net_map_replace: tenant - subnets: - tenant_cloud_1_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/custom_names_vip_data.yaml b/ci/custom_names_vip_data.yaml deleted file mode 100644 index 402c314e0c..0000000000 --- a/ci/custom_names_vip_data.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt_cloud_1 -- network: internal_api_cloud_1 -- network: storage_cloud_1 -- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP - network: external_cloud_1 -- network: ctlplane diff --git a/ci/environments/README.rst b/ci/environments/README.rst deleted file mode 100644 index 4a3cb9d97f..0000000000 --- a/ci/environments/README.rst +++ /dev/null @@ -1,4 +0,0 @@ -This directory contains environments that are used in tripleo-ci. They may change from -release to release or within a release, and should not be relied upon in a production -environment. The top-level ``environments`` directory in tripleo-heat-templates -contains the production-ready environment files. diff --git a/ci/environments/ceph-min-osds.yaml b/ci/environments/ceph-min-osds.yaml deleted file mode 100644 index 4e72d31358..0000000000 --- a/ci/environments/ceph-min-osds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - CephPoolDefaultSize: 1 diff --git a/ci/environments/multinode-containers.yaml b/ci/environments/multinode-containers.yaml deleted file mode 100644 index 5ce898501f..0000000000 --- a/ci/environments/multinode-containers.yaml +++ /dev/null @@ -1,52 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::Horizon - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::NovaManager - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - Debug: True - DockerPuppetDebug: True - NotificationDriver: 'noop' - GlanceBackend: 'file' - ContainerCli: podman diff --git a/ci/environments/multinode-core.yaml b/ci/environments/multinode-core.yaml deleted file mode 100644 index 038ebe755c..0000000000 --- a/ci/environments/multinode-core.yaml +++ /dev/null @@ -1,34 +0,0 @@ -heat_template_version: wallaby - -description: > - OpenStack Core Service - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. Use - parameter_merge_strategies to merge it with the defaults. - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - Debug: - type: boolean - default: false - description: Set to True to enable debugging on all services. - -resources: - -outputs: - role_data: - description: Role data for the multinode firewall configuration - value: - service_name: multinode_core - firewall_rules: - '999 core': - proto: 'udp' - dport: - - 4789 diff --git a/ci/environments/network/multiple-nics-ipv6/README b/ci/environments/network/multiple-nics-ipv6/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/multiple-nics-ipv6/network-environment.yaml b/ci/environments/network/multiple-nics-ipv6/network-environment.yaml deleted file mode 100644 index 818a1ddbc4..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-environment.yaml +++ /dev/null @@ -1,18 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ExternalNetCidr: 2001:db8:fd00:1000::/64 - ExternalAllocationPools: [{"start": "2001:db8:fd00:1000::10", "end": "2001:db8:fd00:1000:ffff:ffff:ffff:fffe"}] - ExternalInterfaceDefaultRoute: 2001:db8:fd00:1000::1 - InternalApiNetCidr: fd00:fd00:fd00:2000::/64 - InternalApiAllocationPools: [{"start": "fd00:fd00:fd00:2000::10", "end": "fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe"}] - StorageNetCidr: fd00:fd00:fd00:3000::/64 - StorageAllocationPools: [{"start": "fd00:fd00:fd00:3000::10", "end": "fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe"}] - StorageMgmtNetCidr: fd00:fd00:fd00:4000::/64 - StorageMgmtAllocationPools: [{"start": "fd00:fd00:fd00:4000::10", "end": "fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] diff --git a/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml b/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml deleted file mode 100644 index 216e73789d..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml +++ /dev/null @@ -1,29 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external_v6.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api_v6.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage_v6.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt_v6.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml -parameter_defaults: - ManilaIPv6: True - diff --git a/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml b/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml deleted file mode 100644 index 6232f3ef4b..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml +++ /dev/null @@ -1,28 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external_v6.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external_v6.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external_v6.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api_v6.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api_v6.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage_v6.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt_v6.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt_v6.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml -parameter_defaults: - ManilaIPv6: True diff --git a/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle b/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle deleted file mode 100644 index 02161a4b57..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle +++ /dev/null @@ -1,732 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -Vfd00:fd00:fd00:3000::10 -p26 -sg15 -Vfd00:fd00:fd00:3000::/64 -p27 -sg17 -I3 -sg18 -Vfd00:fd00:fd00:3000:ffff:ffff:ffff:fffe -p28 -ssS'auto_routes' -p29 -I00 -sS'bond_options' -p30 -V -p31 -sS'version' -p32 -I2 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V2001:db8:fd00:1000:ffff:ffff:ffff:fffe -p37 -sg17 -I1 -sg13 -V2001:db8:fd00:1000::10 -p38 -sg15 -V2001:db8:fd00:1000::/64 -p39 -sS'gateway' -p40 -V2001:db8:fd00:1000::1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -Vfd00:fd00:fd00:2000::10 -p44 -sg15 -Vfd00:fd00:fd00:2000::/64 -p45 -sg17 -I2 -sg18 -Vfd00:fd00:fd00:2000:ffff:ffff:ffff:fffe -p46 -ssS'ipv6' -p47 -I01 -sS'storage_mgmt' -p48 -(dp49 -g13 -Vfd00:fd00:fd00:4000::10 -p50 -sg15 -Vfd00:fd00:fd00:4000::/64 -p51 -sg17 -I4 -sg18 -Vfd00:fd00:fd00:4000:ffff:ffff:ffff:fffe -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sS'members' -p72 -(lp73 -(dp74 -Vip_netmask -p75 -V0.0.0.0/0 -p76 -sVname -p77 -VRoute -p78 -sVdefault -p79 -I01 -sVnext_hop -p80 -V{get_param: ControlPlaneDefaultRoute} -p81 -sg72 -(lp82 -sVtype -p83 -Vroute -p84 -sasVroutes -p85 -(lp86 -sVuse_dhcp -p87 -I00 -sVtype -p88 -Vinterface -p89 -sVname -p90 -Vnic1 -p91 -sa(dp92 -Vaddresses -p93 -(lp94 -sVnetwork -p95 -VStorage -p96 -sVprimary -p97 -I01 -sVmtu -p98 -I1350 -sg72 -(lp99 -sVroutes -p100 -(lp101 -sVuse_dhcp -p102 -I00 -sVtype -p103 -Vinterface -p104 -sVname -p105 -Vnic4 -p106 -sa(dp107 -Vaddresses -p108 -(lp109 -sVnetwork -p110 -VStorageMgmt -p111 -sVprimary -p112 -I01 -sVmtu -p113 -I1350 -sg72 -(lp114 -sVroutes -p115 -(lp116 -sVuse_dhcp -p117 -I00 -sVtype -p118 -Vinterface -p119 -sVname -p120 -Vnic5 -p121 -sasS'controller.yaml' -p122 -(lp123 -(dp124 -Vaddresses -p125 -(lp126 -sVnetwork -p127 -VControlPlane -p128 -sVprimary -p129 -I01 -sVmtu -p130 -I1350 -sg72 -(lp131 -(dp132 -Vip_netmask -p133 -V0.0.0.0/0 -p134 -sVname -p135 -VRoute -p136 -sVdefault -p137 -I01 -sVnext_hop -p138 -V{get_param: ControlPlaneDefaultRoute} -p139 -sg72 -(lp140 -sVtype -p141 -Vroute -p142 -sasVroutes -p143 -(lp144 -sVuse_dhcp -p145 -I00 -sVtype -p146 -Vinterface -p147 -sVname -p148 -Vnic1 -p149 -sa(dp150 -Vdns_servers -p151 -V{get_param: DnsServers} -p152 -sVaddresses -p153 -(lp154 -sVnetwork -p155 -VExternal -p156 -sVmtu -p157 -I-1 -sg72 -(lp158 -(dp159 -Vaddresses -p160 -(lp161 -sVnetwork -p162 -VNone -p163 -sVprimary -p164 -I01 -sVmtu -p165 -I1350 -sg72 -(lp166 -sVroutes -p167 -(lp168 -sVuse_dhcp -p169 -I00 -sVtype -p170 -Vinterface -p171 -sVname -p172 -Vnic2 -p173 -sa(dp174 -Vip_netmask -p175 -V0.0.0.0/0 -p176 -sVname -p177 -VRoute -p178 -sVdefault -p179 -I01 -sVnext_hop -p180 -V{get_param: ExternalInterfaceDefaultRoute} -p181 -sg72 -(lp182 -sVtype -p183 -Vroute -p184 -sasVroutes -p185 -(lp186 -sVuse_dhcp -p187 -I00 -sVtype -p188 -Vovs_bridge -p189 -sVname -p190 -Vbr-ex -p191 -sa(dp192 -Vaddresses -p193 -(lp194 -sVnetwork -p195 -VInternalApi -p196 -sVprimary -p197 -I01 -sVmtu -p198 -I1350 -sg72 -(lp199 -sVroutes -p200 -(lp201 -sVuse_dhcp -p202 -I00 -sVtype -p203 -Vinterface -p204 -sVname -p205 -Vnic3 -p206 -sa(dp207 -Vaddresses -p208 -(lp209 -sVnetwork -p210 -VStorage -p211 -sVprimary -p212 -I01 -sVmtu -p213 -I1350 -sg72 -(lp214 -sVroutes -p215 -(lp216 -sVuse_dhcp -p217 -I00 -sVtype -p218 -Vinterface -p219 -sVname -p220 -Vnic4 -p221 -sa(dp222 -Vaddresses -p223 -(lp224 -sVnetwork -p225 -VStorageMgmt -p226 -sVprimary -p227 -I01 -sVmtu -p228 -I1350 -sg72 -(lp229 -sVroutes -p230 -(lp231 -sVuse_dhcp -p232 -I00 -sVtype -p233 -Vinterface -p234 -sVname -p235 -Vnic5 -p236 -sa(dp237 -Vdns_servers -p238 -V{get_param: DnsServers} -p239 -sVaddresses -p240 -(lp241 -sVnetwork -p242 -VTenant -p243 -sVmtu -p244 -I-1 -sg72 -(lp245 -(dp246 -Vaddresses -p247 -(lp248 -sVnetwork -p249 -VNone -p250 -sVprimary -p251 -I01 -sVmtu -p252 -I1350 -sg72 -(lp253 -sVroutes -p254 -(lp255 -sVuse_dhcp -p256 -I00 -sVtype -p257 -Vinterface -p258 -sVname -p259 -Vnic6 -p260 -sasVroutes -p261 -(lp262 -sVuse_dhcp -p263 -I00 -sVtype -p264 -Vovs_bridge -p265 -sVname -p266 -Vbr-tenant -p267 -sasS'swift-storage.yaml' -p268 -(lp269 -sS'compute.yaml' -p270 -(lp271 -(dp272 -Vaddresses -p273 -(lp274 -sVnetwork -p275 -VControlPlane -p276 -sVprimary -p277 -I01 -sVmtu -p278 -I1350 -sg72 -(lp279 -(dp280 -Vip_netmask -p281 -V0.0.0.0/0 -p282 -sVname -p283 -VRoute -p284 -sVdefault -p285 -I01 -sVnext_hop -p286 -V{get_param: ControlPlaneDefaultRoute} -p287 -sg72 -(lp288 -sVtype -p289 -Vroute -p290 -sasVroutes -p291 -(lp292 -sVuse_dhcp -p293 -I00 -sVtype -p294 -Vinterface -p295 -sVname -p296 -Vnic1 -p297 -sa(dp298 -Vaddresses -p299 -(lp300 -sVnetwork -p301 -VInternalApi -p302 -sVprimary -p303 -I01 -sVmtu -p304 -I1350 -sg72 -(lp305 -sVroutes -p306 -(lp307 -sVuse_dhcp -p308 -I00 -sVtype -p309 -Vinterface -p310 -sVname -p311 -Vnic3 -p312 -sa(dp313 -Vaddresses -p314 -(lp315 -sVnetwork -p316 -VStorage -p317 -sVprimary -p318 -I01 -sVmtu -p319 -I1350 -sg72 -(lp320 -sVroutes -p321 -(lp322 -sVuse_dhcp -p323 -I00 -sVtype -p324 -Vinterface -p325 -sVname -p326 -Vnic4 -p327 -sa(dp328 -Vdns_servers -p329 -V{get_param: DnsServers} -p330 -sVaddresses -p331 -(lp332 -sVnetwork -p333 -VTenant -p334 -sVmtu -p335 -I-1 -sg72 -(lp336 -(dp337 -Vaddresses -p338 -(lp339 -sVnetwork -p340 -VNone -p341 -sVprimary -p342 -I01 -sVmtu -p343 -I1350 -sg72 -(lp344 -sVroutes -p345 -(lp346 -sVuse_dhcp -p347 -I00 -sVtype -p348 -Vinterface -p349 -sVname -p350 -Vnic6 -p351 -sasVroutes -p352 -(lp353 -sVuse_dhcp -p354 -I00 -sVtype -p355 -Vovs_bridge -p356 -sVname -p357 -Vbr-tenant -p358 -sass. \ No newline at end of file diff --git a/ci/environments/network/multiple-nics/README b/ci/environments/network/multiple-nics/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/multiple-nics/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/multiple-nics/custom-network-environment.yaml b/ci/environments/network/multiple-nics/custom-network-environment.yaml deleted file mode 100644 index 841b2cdf5d..0000000000 --- a/ci/environments/network/multiple-nics/custom-network-environment.yaml +++ /dev/null @@ -1,27 +0,0 @@ -parameter_merge_strategies: - ServiceNetMap: merge -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2' - ComputeNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2' - CloudNameExternalCloud1: overcloud1.ooo.test - CloudNameInternalApiCloud1: overcloud1.internalapicloud1.ooo.test - CloudNameStorageCloud1: overcloud1.storagecloud1.ooo.test - CloudNameStorageMgmtCloud1: overcloud1.storagemgmtcloud1.ooo.test - CloudNameCtlplane: overcloud1.ctlplane.ooo.test - CloudDomain: ooo.test - VipSubnetMap: - ctlplane: ctlplane-subnet - InternalApiCloud1: internal_api_cloud_1_subnet - StorageCloud1: storage_cloud_1_subnet - StorageMgmtCloud1: storage_mgmt_cloud_1_subnet - TenantCloud1: tenant_cloud_1_subnet - ExternalCloud1: external_cloud_1_subnet - redis: internal_api_cloud_1_subnet - ovn_dbs: internal_api_cloud_1_subnet - ServiceNetMap: - ComputeHostnameResolveNetwork: internal_api_cloud_1 - ControllerHostnameResolveNetwork: internal_api_cloud_1 - CephStorageHostnameResolveNetwork: storage_cloud_1 - ControllerMetricsQdrNetwork: internal_api_cloud_1 - ComputeMetricsQdrNetwork: internal_api_cloud_1 - CephStorageMetricsQdrNetwork: storage_cloud_1 diff --git a/ci/environments/network/multiple-nics/network-environment.yaml b/ci/environments/network/multiple-nics/network-environment.yaml deleted file mode 100644 index a57fa59258..0000000000 --- a/ci/environments/network/multiple-nics/network-environment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ExternalNetCidr: 10.0.0.0/24 - ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}] - ExternalInterfaceDefaultRoute: 10.0.0.1 - PublicVirtualFixedIPs: [{ "ip_address": "10.0.0.5" }] - InternalApiNetCidr: 172.17.0.0/24 - InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}] - StorageNetCidr: 172.18.0.0/24 - StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}] - StorageMgmtNetCidr: 172.19.0.0/24 - StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] diff --git a/ci/environments/network/multiple-nics/network-isolation-absolute.yaml b/ci/environments/network/multiple-nics/network-isolation-absolute.yaml deleted file mode 100644 index b925631c3a..0000000000 --- a/ci/environments/network/multiple-nics/network-isolation-absolute.yaml +++ /dev/null @@ -1,32 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::CellController::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::CellController::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CellController::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CellController::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::CellController::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml diff --git a/ci/environments/network/multiple-nics/network-isolation.yaml b/ci/environments/network/multiple-nics/network-isolation.yaml deleted file mode 100644 index fd05d7cd0c..0000000000 --- a/ci/environments/network/multiple-nics/network-isolation.yaml +++ /dev/null @@ -1,27 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Compute::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml diff --git a/ci/environments/network/multiple-nics/ui-settings.pickle b/ci/environments/network/multiple-nics/ui-settings.pickle deleted file mode 100644 index 32b4be3463..0000000000 --- a/ci/environments/network/multiple-nics/ui-settings.pickle +++ /dev/null @@ -1,716 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -V172.18.0.10 -p26 -sg15 -V172.18.0.0/24 -p27 -sg17 -I3 -sg18 -V172.18.0.250 -p28 -ssS'auto_routes' -p29 -I01 -sS'bond_options' -p30 -V -p31 -sS'version' -p32 -I1 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V10.0.0.50 -p37 -sg17 -I1 -sg13 -V10.0.0.10 -p38 -sg15 -V10.0.0.0/24 -p39 -sS'gateway' -p40 -V10.0.0.1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -V172.17.0.10 -p44 -sg15 -V172.17.0.0/24 -p45 -sg17 -I2 -sg18 -V172.17.0.250 -p46 -ssS'ipv6' -p47 -I00 -sS'storage_mgmt' -p48 -(dp49 -g13 -V172.19.0.10 -p50 -sg15 -V172.19.0.0/24 -p51 -sg17 -I4 -sg18 -V172.19.0.250 -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sVport_name -p72 -g31 -sS'members' -p73 -(lp74 -sVroutes -p75 -(lp76 -sVuse_dhcp -p77 -I00 -sVtype -p78 -Vinterface -p79 -sVname -p80 -Vnic1 -p81 -sa(dp82 -Vaddresses -p83 -(lp84 -sVnetwork -p85 -VStorage -p86 -sVprimary -p87 -I01 -sVmtu -p88 -I1350 -sVport_name -p89 -g31 -sg73 -(lp90 -sVroutes -p91 -(lp92 -sVuse_dhcp -p93 -I00 -sVtype -p94 -Vinterface -p95 -sVname -p96 -Vnic4 -p97 -sa(dp98 -Vaddresses -p99 -(lp100 -sVnetwork -p101 -VStorageMgmt -p102 -sVprimary -p103 -I01 -sVmtu -p104 -I1350 -sVport_name -p105 -g31 -sg73 -(lp106 -sVroutes -p107 -(lp108 -sVuse_dhcp -p109 -I00 -sVtype -p110 -Vinterface -p111 -sVname -p112 -Vnic5 -p113 -sasS'controller.yaml' -p114 -(lp115 -(dp116 -Vaddresses -p117 -(lp118 -sVnetwork -p119 -VControlPlane -p120 -sVprimary -p121 -I01 -sVmtu -p122 -I1350 -sVport_name -p123 -g31 -sg73 -(lp124 -sVroutes -p125 -(lp126 -sVuse_dhcp -p127 -I00 -sVtype -p128 -Vinterface -p129 -sVname -p130 -Vnic1 -p131 -sa(dp132 -Vdns_servers -p133 -V{get_param: DnsServers} -p134 -sVaddresses -p135 -(lp136 -sVnetwork -p137 -VExternal -p138 -sVmtu -p139 -I-1 -sg73 -(lp140 -(dp141 -Vaddresses -p142 -(lp143 -sVnetwork -p144 -VNone -p145 -sVprimary -p146 -I01 -sVmtu -p147 -I1350 -sVport_name -p148 -g31 -sg73 -(lp149 -sVroutes -p150 -(lp151 -sVuse_dhcp -p152 -I00 -sVtype -p153 -Vinterface -p154 -sVname -p155 -Vnic2 -p156 -sasVroutes -p157 -(lp158 -sVuse_dhcp -p159 -I00 -sVtype -p160 -Vovs_bridge -p161 -sVname -p162 -Vbr-ex -p163 -sa(dp164 -Vaddresses -p165 -(lp166 -sVnetwork -p167 -VInternalApi -p168 -sVprimary -p169 -I01 -sVmtu -p170 -I1350 -sVport_name -p171 -g31 -sg73 -(lp172 -sVroutes -p173 -(lp174 -sVuse_dhcp -p175 -I00 -sVtype -p176 -Vinterface -p177 -sVname -p178 -Vnic3 -p179 -sa(dp180 -Vaddresses -p181 -(lp182 -sVnetwork -p183 -VStorage -p184 -sVprimary -p185 -I01 -sVmtu -p186 -I1350 -sVport_name -p187 -g31 -sg73 -(lp188 -sVroutes -p189 -(lp190 -sVuse_dhcp -p191 -I00 -sVtype -p192 -Vinterface -p193 -sVname -p194 -Vnic4 -p195 -sa(dp196 -Vaddresses -p197 -(lp198 -sVnetwork -p199 -VStorageMgmt -p200 -sVprimary -p201 -I01 -sVmtu -p202 -I1350 -sVport_name -p203 -g31 -sg73 -(lp204 -sVroutes -p205 -(lp206 -sVuse_dhcp -p207 -I00 -sVtype -p208 -Vinterface -p209 -sVname -p210 -Vnic5 -p211 -sa(dp212 -Vdns_servers -p213 -V{get_param: DnsServers} -p214 -sVaddresses -p215 -(lp216 -sVnetwork -p217 -VTenant -p218 -sVmtu -p219 -I-1 -sg73 -(lp220 -(dp221 -Vaddresses -p222 -(lp223 -sVnetwork -p224 -VNone -p225 -sVprimary -p226 -I01 -sVmtu -p227 -I1350 -sVport_name -p228 -g31 -sg73 -(lp229 -sVroutes -p230 -(lp231 -sVuse_dhcp -p232 -I00 -sVtype -p233 -Vinterface -p234 -sVname -p235 -Vnic6 -p236 -sasVroutes -p237 -(lp238 -sVuse_dhcp -p239 -I00 -sVtype -p240 -Vovs_bridge -p241 -sVname -p242 -Vbr-tenant -p243 -sasS'swift-storage.yaml' -p244 -(lp245 -sS'compute.yaml' -p246 -(lp247 -(dp248 -Vaddresses -p249 -(lp250 -sVnetwork -p251 -VControlPlane -p252 -sVprimary -p253 -I01 -sVmtu -p254 -I1350 -sVport_name -p255 -g31 -sg73 -(lp256 -sVroutes -p257 -(lp258 -sVuse_dhcp -p259 -I00 -sVtype -p260 -Vinterface -p261 -sVname -p262 -Vnic1 -p263 -sa(dp264 -Vaddresses -p265 -(lp266 -sVnetwork -p267 -VInternalApi -p268 -sVprimary -p269 -I01 -sVmtu -p270 -I1350 -sVport_name -p271 -g31 -sg73 -(lp272 -sVroutes -p273 -(lp274 -sVuse_dhcp -p275 -I00 -sVtype -p276 -Vinterface -p277 -sVname -p278 -Vnic3 -p279 -sa(dp280 -Vaddresses -p281 -(lp282 -sVnetwork -p283 -VStorage -p284 -sVprimary -p285 -I01 -sVmtu -p286 -I1350 -sVport_name -p287 -g31 -sg73 -(lp288 -sVroutes -p289 -(lp290 -sVuse_dhcp -p291 -I00 -sVtype -p292 -Vinterface -p293 -sVname -p294 -Vnic4 -p295 -sa(dp296 -Vdns_servers -p297 -V{get_param: DnsServers} -p298 -sVaddresses -p299 -(lp300 -sVnetwork -p301 -VTenant -p302 -sVmtu -p303 -I-1 -sg73 -(lp304 -(dp305 -Vaddresses -p306 -(lp307 -sVnetwork -p308 -VNone -p309 -sVprimary -p310 -I01 -sVmtu -p311 -I1350 -sVport_name -p312 -g31 -sg73 -(lp313 -sVroutes -p314 -(lp315 -sVuse_dhcp -p316 -I00 -sVtype -p317 -Vinterface -p318 -sVname -p319 -Vnic6 -p320 -sasVroutes -p321 -(lp322 -sVuse_dhcp -p323 -I00 -sVtype -p324 -Vovs_bridge -p325 -sVname -p326 -Vbr-tenant -p327 -sa(dp328 -Vaddresses -p329 -(lp330 -sVnetwork -p331 -VStorageMgmt -p332 -sVprimary -p333 -I01 -sVmtu -p334 -I1350 -sVport_name -p335 -g31 -sg73 -(lp336 -sVroutes -p337 -(lp338 -sVuse_dhcp -p339 -I00 -sVtype -p340 -Vinterface -p341 -sVname -p342 -Vnic5 -p343 -sass. \ No newline at end of file diff --git a/ci/environments/network/public-bond/README b/ci/environments/network/public-bond/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/public-bond/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/public-bond/network-environment.yaml b/ci/environments/network/public-bond/network-environment.yaml deleted file mode 100644 index 527042f922..0000000000 --- a/ci/environments/network/public-bond/network-environment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ComputeNetworkConfigTemplate: 'templates/ci/public_bond.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ExternalNetCidr: 10.0.0.0/24 - ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}] - ExternalInterfaceDefaultRoute: 10.0.0.1 - InternalApiNetCidr: 172.17.0.0/24 - InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}] - StorageNetCidr: 172.18.0.0/24 - StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}] - StorageMgmtNetCidr: 172.19.0.0/24 - StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] - BondInterfaceOvsOptions: bond_mode=balance-slb diff --git a/ci/environments/network/public-bond/network-isolation-absolute.yaml b/ci/environments/network/public-bond/network-isolation-absolute.yaml deleted file mode 100644 index 24bae4b2db..0000000000 --- a/ci/environments/network/public-bond/network-isolation-absolute.yaml +++ /dev/null @@ -1,26 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml diff --git a/ci/environments/network/public-bond/network-isolation.yaml b/ci/environments/network/public-bond/network-isolation.yaml deleted file mode 100644 index d59d260dea..0000000000 --- a/ci/environments/network/public-bond/network-isolation.yaml +++ /dev/null @@ -1,26 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml diff --git a/ci/environments/network/public-bond/ui-settings.pickle b/ci/environments/network/public-bond/ui-settings.pickle deleted file mode 100644 index 0927ffeb97..0000000000 --- a/ci/environments/network/public-bond/ui-settings.pickle +++ /dev/null @@ -1,699 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -V172.18.0.10 -p26 -sg15 -V172.18.0.0/24 -p27 -sg17 -I3 -sg18 -V172.18.0.250 -p28 -ssS'auto_routes' -p29 -I01 -sS'bond_options' -p30 -Vbond_mode=balance-slb -p31 -sS'version' -p32 -I2 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V10.0.0.50 -p37 -sg17 -I1 -sg13 -V10.0.0.10 -p38 -sg15 -V10.0.0.0/24 -p39 -sS'gateway' -p40 -V10.0.0.1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -V172.17.0.10 -p44 -sg15 -V172.17.0.0/24 -p45 -sg17 -I2 -sg18 -V172.17.0.250 -p46 -ssS'ipv6' -p47 -I00 -sS'storage_mgmt' -p48 -(dp49 -g13 -V172.19.0.10 -p50 -sg15 -V172.19.0.0/24 -p51 -sg17 -I4 -sg18 -V172.19.0.250 -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sS'members' -p72 -(lp73 -sVroutes -p74 -(lp75 -sVuse_dhcp -p76 -I00 -sVtype -p77 -Vinterface -p78 -sVname -p79 -Vnic1 -p80 -sa(dp81 -Vaddresses -p82 -(lp83 -sVnetwork -p84 -VStorage -p85 -sVprimary -p86 -I01 -sVmtu -p87 -I1350 -sg72 -(lp88 -sVroutes -p89 -(lp90 -sVuse_dhcp -p91 -I00 -sVtype -p92 -Vinterface -p93 -sVname -p94 -Vnic5 -p95 -sa(dp96 -Vaddresses -p97 -(lp98 -sVnetwork -p99 -VStorageMgmt -p100 -sVprimary -p101 -I01 -sVmtu -p102 -I1350 -sg72 -(lp103 -sVroutes -p104 -(lp105 -sVuse_dhcp -p106 -I00 -sVtype -p107 -Vinterface -p108 -sVname -p109 -Vnic6 -p110 -sasS'controller.yaml' -p111 -(lp112 -(dp113 -Vaddresses -p114 -(lp115 -sVnetwork -p116 -VControlPlane -p117 -sVprimary -p118 -I01 -sVmtu -p119 -I1350 -sg72 -(lp120 -sVroutes -p121 -(lp122 -sVuse_dhcp -p123 -I00 -sVtype -p124 -Vinterface -p125 -sVname -p126 -Vnic1 -p127 -sa(dp128 -Vdns_servers -p129 -V{get_param: DnsServers} -p130 -sVaddresses -p131 -(lp132 -sVnetwork -p133 -VExternal -p134 -sVmtu -p135 -I-1 -sg72 -(lp136 -(dp137 -Vnetwork -p138 -VNone -p139 -sVbond_type -p140 -Vovs -p141 -sVovs_options -p142 -V{get_param: BondInterfaceOvsOptions} -p143 -sVmtu -p144 -I-1 -sg72 -(lp145 -(dp146 -Vaddresses -p147 -(lp148 -sVnetwork -p149 -VNone -p150 -sVprimary -p151 -I01 -sVmtu -p152 -I1350 -sVroutes -p153 -(lp154 -sVuse_dhcp -p155 -I00 -sVtype -p156 -Vinterface -p157 -sVname -p158 -Vnic2 -p159 -sa(dp160 -Vaddresses -p161 -(lp162 -sVnetwork -p163 -VNone -p164 -sVprimary -p165 -I00 -sVmtu -p166 -I1350 -sVroutes -p167 -(lp168 -sVuse_dhcp -p169 -I00 -sVtype -p170 -Vinterface -p171 -sVname -p172 -Vnic3 -p173 -sasVroutes -p174 -(lp175 -sVtype -p176 -Vovs_bond -p177 -sVname -p178 -Vbond1 -p179 -sasVroutes -p180 -(lp181 -sVuse_dhcp -p182 -I00 -sVtype -p183 -Vovs_bridge -p184 -sVname -p185 -Vbr-ex -p186 -sa(dp187 -Vaddresses -p188 -(lp189 -sVnetwork -p190 -VInternalApi -p191 -sVprimary -p192 -I01 -sVmtu -p193 -I1350 -sg72 -(lp194 -sVroutes -p195 -(lp196 -sVuse_dhcp -p197 -I00 -sVtype -p198 -Vinterface -p199 -sVname -p200 -Vnic4 -p201 -sa(dp202 -Vaddresses -p203 -(lp204 -sVnetwork -p205 -VStorage -p206 -sVprimary -p207 -I01 -sVmtu -p208 -I1350 -sg72 -(lp209 -sVroutes -p210 -(lp211 -sVuse_dhcp -p212 -I00 -sVtype -p213 -Vinterface -p214 -sVname -p215 -Vnic5 -p216 -sa(dp217 -Vaddresses -p218 -(lp219 -sVnetwork -p220 -VStorageMgmt -p221 -sVprimary -p222 -I01 -sVmtu -p223 -I1350 -sg72 -(lp224 -sVroutes -p225 -(lp226 -sVuse_dhcp -p227 -I00 -sVtype -p228 -Vinterface -p229 -sVname -p230 -Vnic6 -p231 -sa(dp232 -Vdns_servers -p233 -V{get_param: DnsServers} -p234 -sVaddresses -p235 -(lp236 -sVnetwork -p237 -VTenant -p238 -sVmtu -p239 -I-1 -sg72 -(lp240 -(dp241 -Vaddresses -p242 -(lp243 -sVnetwork -p244 -VNone -p245 -sVprimary -p246 -I01 -sVmtu -p247 -I1350 -sg72 -(lp248 -sVroutes -p249 -(lp250 -sVuse_dhcp -p251 -I00 -sVtype -p252 -Vinterface -p253 -sVname -p254 -Vnic7 -p255 -sasVroutes -p256 -(lp257 -sVuse_dhcp -p258 -I00 -sVtype -p259 -Vovs_bridge -p260 -sVname -p261 -Vbr-tenant -p262 -sasS'swift-storage.yaml' -p263 -(lp264 -sS'compute.yaml' -p265 -(lp266 -(dp267 -Vaddresses -p268 -(lp269 -sVnetwork -p270 -VControlPlane -p271 -sVprimary -p272 -I01 -sVmtu -p273 -I1350 -sg72 -(lp274 -sVroutes -p275 -(lp276 -sVuse_dhcp -p277 -I00 -sVtype -p278 -Vinterface -p279 -sVname -p280 -Vnic1 -p281 -sa(dp282 -Vaddresses -p283 -(lp284 -sVnetwork -p285 -VInternalApi -p286 -sVprimary -p287 -I01 -sVmtu -p288 -I1350 -sg72 -(lp289 -sVroutes -p290 -(lp291 -sVuse_dhcp -p292 -I00 -sVtype -p293 -Vinterface -p294 -sVname -p295 -Vnic4 -p296 -sa(dp297 -Vaddresses -p298 -(lp299 -sVnetwork -p300 -VStorage -p301 -sVprimary -p302 -I01 -sVmtu -p303 -I1350 -sg72 -(lp304 -sVroutes -p305 -(lp306 -sVuse_dhcp -p307 -I00 -sVtype -p308 -Vinterface -p309 -sVname -p310 -Vnic5 -p311 -sa(dp312 -Vdns_servers -p313 -V{get_param: DnsServers} -p314 -sVaddresses -p315 -(lp316 -sVnetwork -p317 -VTenant -p318 -sVmtu -p319 -I-1 -sg72 -(lp320 -(dp321 -Vaddresses -p322 -(lp323 -sVnetwork -p324 -VNone -p325 -sVprimary -p326 -I01 -sVmtu -p327 -I1350 -sg72 -(lp328 -sVroutes -p329 -(lp330 -sVuse_dhcp -p331 -I00 -sVtype -p332 -Vinterface -p333 -sVname -p334 -Vnic7 -p335 -sasVroutes -p336 -(lp337 -sVuse_dhcp -p338 -I00 -sVtype -p339 -Vovs_bridge -p340 -sVname -p341 -Vbr-tenant -p342 -sass. \ No newline at end of file diff --git a/ci/environments/neutron_dns_domain.yaml b/ci/environments/neutron_dns_domain.yaml deleted file mode 100644 index 899e744e29..0000000000 --- a/ci/environments/neutron_dns_domain.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronDnsDomain: 'openstackgate.local' diff --git a/ci/environments/neutron_l3_qos.yaml b/ci/environments/neutron_l3_qos.yaml deleted file mode 100644 index 5a83884231..0000000000 --- a/ci/environments/neutron_l3_qos.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronL3AgentExtensions: 'fip_qos,gateway_ip_qos,port_forwarding' diff --git a/ci/environments/neutron_sec_group_quota.yaml b/ci/environments/neutron_sec_group_quota.yaml deleted file mode 100644 index 62530b0860..0000000000 --- a/ci/environments/neutron_sec_group_quota.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronSecurityGroupQuota: '150' diff --git a/ci/environments/octavia-kvm.yaml b/ci/environments/octavia-kvm.yaml deleted file mode 100644 index c806f3252b..0000000000 --- a/ci/environments/octavia-kvm.yaml +++ /dev/null @@ -1,7 +0,0 @@ -parameter_defaults: - StandaloneExtraConfig: - # Config related to nova libvirt - octavia::controller::connection_retry_interval: 10 - nova::compute::libvirt::services::libvirt_virt_type: kvm - nova::compute::libvirt::virt_type: kvm - nova::compute::libvirt::cpu_mode: 'host-passthrough' diff --git a/ci/environments/ovb-ha.yaml b/ci/environments/ovb-ha.yaml deleted file mode 100644 index 1d3d7150a9..0000000000 --- a/ci/environments/ovb-ha.yaml +++ /dev/null @@ -1,10 +0,0 @@ -parameter_defaults: - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - Debug: true - HideSensitiveLogs: false - DockerPuppetDebug: True - ExtraConfig: - tripleo::profile::pacemaker::database::mysql_bundle::provider_options: 'evs.suspect_timeout=PT30S' - CorosyncTokenTimeout: 30000 diff --git a/ci/environments/scenario000-multinode-containers.yaml b/ci/environments/scenario000-multinode-containers.yaml deleted file mode 100644 index 717f0f9918..0000000000 --- a/ci/environments/scenario000-multinode-containers.yaml +++ /dev/null @@ -1,27 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::Clustercheck - Debug: true - DockerPuppetDebug: True - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario000-standalone.yaml b/ci/environments/scenario000-standalone.yaml deleted file mode 100644 index 2692431cbb..0000000000 --- a/ci/environments/scenario000-standalone.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# This scenario only install keystone and the required services to run it. -resource_registry: - OS::TripleO::Services::AodhApi: OS::Heat::None - OS::TripleO::Services::AodhEvaluator: OS::Heat::None - OS::TripleO::Services::AodhListener: OS::Heat::None - OS::TripleO::Services::AodhNotifier: OS::Heat::None - OS::TripleO::Services::Apache: OS::Heat::None - OS::TripleO::Services::AuditD: OS::Heat::None - OS::TripleO::Services::BarbicanApi: OS::Heat::None - OS::TripleO::Services::BarbicanBackendDogtag: OS::Heat::None - OS::TripleO::Services::BarbicanBackendKmip: OS::Heat::None - OS::TripleO::Services::BarbicanBackendPkcs11Crypto: OS::Heat::None - OS::TripleO::Services::BarbicanBackendSimpleCrypto: OS::Heat::None - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None - OS::TripleO::Services::BootParams: OS::Heat::None - OS::TripleO::Services::CeilometerAgentCentral: OS::Heat::None - OS::TripleO::Services::CeilometerAgentIpmi: OS::Heat::None - OS::TripleO::Services::CeilometerAgentNotification: OS::Heat::None - OS::TripleO::Services::CephClient: OS::Heat::None - OS::TripleO::Services::CephExternal: OS::Heat::None - OS::TripleO::Services::CephMds: OS::Heat::None - OS::TripleO::Services::CephMgr: OS::Heat::None - OS::TripleO::Services::CephMon: OS::Heat::None - OS::TripleO::Services::CephNfs: OS::Heat::None - OS::TripleO::Services::CephOSD: OS::Heat::None - OS::TripleO::Services::CephRbdMirror: OS::Heat::None - OS::TripleO::Services::CephRgw: OS::Heat::None - OS::TripleO::Services::Chrony: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Clustercheck: OS::Heat::None - OS::TripleO::Services::Collectd: OS::Heat::None - OS::TripleO::Services::ComputeCeilometerAgent: OS::Heat::None - OS::TripleO::Services::ComputeInstanceHA: OS::Heat::None - OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None - OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsDpdk: OS::Heat::None - OS::TripleO::Services::ContainerImagePrepare: ../../deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml - OS::TripleO::Services::DesignateApi: OS::Heat::None - OS::TripleO::Services::DesignateCentral: OS::Heat::None - OS::TripleO::Services::DesignateMDNS: OS::Heat::None - OS::TripleO::Services::DesignateProducer: OS::Heat::None - OS::TripleO::Services::DesignateSink: OS::Heat::None - OS::TripleO::Services::DesignateWorker: OS::Heat::None - OS::TripleO::Services::DesignateBind: OS::Heat::None - OS::TripleO::Services::Etcd: OS::Heat::None - OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None - OS::TripleO::Services::GlanceApi: OS::Heat::None - OS::TripleO::Services::GlanceApiInternal: OS::Heat::None - OS::TripleO::Services::GnocchiApi: OS::Heat::None - OS::TripleO::Services::GnocchiMetricd: OS::Heat::None - OS::TripleO::Services::GnocchiStatsd: OS::Heat::None - OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None - OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None - OS::TripleO::Services::HAproxy: OS::Heat::None - OS::TripleO::Services::HeatApi: OS::Heat::None - OS::TripleO::Services::HeatApiCfn: OS::Heat::None - OS::TripleO::Services::HeatEngine: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::IpaClient: OS::Heat::None - OS::TripleO::Services::Ipsec: OS::Heat::None - OS::TripleO::Services::IronicApi: OS::Heat::None - OS::TripleO::Services::IronicConductor: OS::Heat::None - OS::TripleO::Services::IronicInspector: OS::Heat::None - OS::TripleO::Services::IronicNeutronAgent: OS::Heat::None - OS::TripleO::Services::IronicPxe: OS::Heat::None - OS::TripleO::Services::Iscsid: OS::Heat::None - # OS::TripleO::Services::Keystone: OS::Heat::None - OS::TripleO::Services::Logging::BarbicanApi: OS::Heat::None - OS::TripleO::Services::Logging::GlanceApi: OS::Heat::None - OS::TripleO::Services::Logging::HAProxy: OS::Heat::None - OS::TripleO::Services::Logging::HeatApi: OS::Heat::None - OS::TripleO::Services::Logging::HeatApiCfn: OS::Heat::None - # OS::TripleO::Services::Logging::Keystone: OS::Heat::None - OS::TripleO::Services::Logging::NeutronApi: OS::Heat::None - OS::TripleO::Services::Logging::NeutronCommon: OS::Heat::None - OS::TripleO::Services::Logging::NovaApi: OS::Heat::None - OS::TripleO::Services::Logging::NovaCommon: OS::Heat::None - OS::TripleO::Services::Logging::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::Logging::NovaMetadata: OS::Heat::None - OS::TripleO::Services::Logging::PlacementApi: OS::Heat::None - OS::TripleO::Services::ManilaApi: OS::Heat::None - OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None - OS::TripleO::Services::ManilaBackendFlashBlade: OS::Heat::None - OS::TripleO::Services::ManilaBackendIsilon: OS::Heat::None - OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None - OS::TripleO::Services::ManilaBackendPowerMax: OS::Heat::None - OS::TripleO::Services::ManilaBackendUnity: OS::Heat::None - OS::TripleO::Services::ManilaBackendVNX: OS::Heat::None - OS::TripleO::Services::ManilaScheduler: OS::Heat::None - OS::TripleO::Services::ManilaShare: OS::Heat::None - OS::TripleO::Services::MasqueradeNetworks: OS::Heat::None - OS::TripleO::Services::Memcached: OS::Heat::None - OS::TripleO::Services::MetricsQdr: OS::Heat::None - OS::TripleO::Services::Multipathd: OS::Heat::None - # OS::TripleO::Services::MySQL: OS::Heat::None - OS::TripleO::Services::NeutronApi: OS::Heat::None - OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None - OS::TripleO::Services::NeutronBgpVpnBagpipe: OS::Heat::None - OS::TripleO::Services::NeutronCorePlugin: OS::Heat::None - OS::TripleO::Services::NeutronCorePluginMLNXSDN: OS::Heat::None - OS::TripleO::Services::NeutronCorePluginVTS: OS::Heat::None - OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None - OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None - OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None - OS::TripleO::Services::NeutronL3Agent: OS::Heat::None - OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None - OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None - OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::NeutronSfcApi: OS::Heat::None - OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None - OS::TripleO::Services::NovaAZConfig: OS::Heat::None - OS::TripleO::Services::NovaApi: OS::Heat::None - OS::TripleO::Services::NovaCompute: OS::Heat::None - OS::TripleO::Services::NovaConductor: OS::Heat::None - OS::TripleO::Services::NovaIronic: OS::Heat::None - OS::TripleO::Services::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::NovaLibvirtGuests: OS::Heat::None - OS::TripleO::Services::NovaMetadata: OS::Heat::None - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - OS::TripleO::Services::NovaScheduler: OS::Heat::None - OS::TripleO::Services::NovaVncProxy: OS::Heat::None - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - OS::TripleO::Services::OctaviaApi: OS::Heat::None - OS::TripleO::Services::OctaviaDeploymentConfig: OS::Heat::None - OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None - OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None - OS::TripleO::Services::OctaviaWorker: OS::Heat::None - # OS::TripleO::Services::OsloMessagingNotify: OS::Heat::None - # OS::TripleO::Services::OsloMessagingRpc: OS::Heat::None - OS::TripleO::Services::Pacemaker: OS::Heat::None - OS::TripleO::Services::PacemakerRemote: OS::Heat::None - OS::TripleO::Services::PlacementApi: OS::Heat::None - OS::TripleO::Services::Podman: ../../deployment/podman/podman-baremetal-ansible.yaml - OS::TripleO::Services::Ptp: OS::Heat::None - OS::TripleO::Services::Qdr: OS::Heat::None - # OS::TripleO::Services::RabbitMQ: OS::Heat::None - OS::TripleO::Services::Redis: OS::Heat::None - OS::TripleO::Services::RsyslogSidecar: OS::Heat::None - OS::TripleO::Services::SELinux: OS::Heat::None - OS::TripleO::Services::Securetty: OS::Heat::None - OS::TripleO::Services::Snmp: OS::Heat::None - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::TLSProxyBase: OS::Heat::None - OS::TripleO::Services::Tempest: OS::Heat::None - OS::TripleO::Services::Tmpwatch: ../../deployment/logrotate/tmpwatch-install.yaml - OS::TripleO::Services::TripleoPackages: OS::Heat::None - OS::TripleO::Services::Tuned: OS::Heat::None - OS::TripleO::Services::Unbound: OS::Heat::None - OS::TripleO::Services::UndercloudTLS: OS::Heat::None - OS::TripleO::Services::UndercloudUpgrade: OS::Heat::None -parameter_defaults: - Debug: true - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml deleted file mode 100644 index fb7578e47f..0000000000 --- a/ci/environments/scenario001-multinode-containers.yaml +++ /dev/null @@ -1,168 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentIpmi: ../../deployment/ceilometer/ceilometer-agent-ipmi-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml - OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::Redis - - OS::TripleO::Services::AodhApi - - OS::TripleO::Services::AodhEvaluator - - OS::TripleO::Services::AodhNotifier - - OS::TripleO::Services::AodhListener - - OS::TripleO::Services::CeilometerAgentCentral - - OS::TripleO::Services::CeilometerAgentIpmi - - OS::TripleO::Services::CeilometerAgentNotification - - OS::TripleO::Services::ComputeCeilometerAgent - - OS::TripleO::Services::GnocchiApi - - OS::TripleO::Services::GnocchiMetricd - - OS::TripleO::Services::GnocchiStatsd - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CinderApi - - OS::TripleO::Services::CinderBackup - - OS::TripleO::Services::CinderScheduler - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the - # 'ceilometer-high-rate' Gnocchi archive policy to keep 1 point every 60s. - # The test will take 2 minutes instead of 10 minutes. Note that tempest - # telemetry.alarm_granularity must in sync with the archive policy, 60s - # too. - ceilometer::agent::polling::polling_interval: 15 - # NOTE(mmagr): uncomment when QDR mesh will actually work (followup on review.opendev.org/702754) - #tripleo::profile::base::metrics::qdr::router_mode: interior - NotificationDriver: 'messagingv2' - ManagePolling: true - ManagePipeline: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - CeilometerQdrPublishEvents: true - ManageEventPipeline: true - Debug: true - DockerPuppetDebug: True - CephPools: - - name: altrbd - rule_name: replicated_rule - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephEnableDashboard: true - NovaEnableRbdBackend: true - CinderEnableRbdBackend: true - CinderRbdExtraPools: altrbd - CinderBackupBackend: ceph - GlanceBackend: rbd - GnocchiBackend: rbd - CinderEnableIscsiBackend: false - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdConnectionType: amqp1 - CollectdExtraPlugins: - - rrdtool - CollectdEnableSensubility: true - CollectdEnableLibpodstats: true - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - # Remove ContainerCli once this scenario is tested on CentOS8 - ContainerCli: podman - CephConfigPath: "/etc/ceph" - CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml" - CephSpecFqdn: true - CephOsdSpec: - data_devices: - paths: - - /dev/ceph_vg/ceph_lv_data diff --git a/ci/environments/scenario001-standalone.yaml b/ci/environments/scenario001-standalone.yaml deleted file mode 100644 index 8c49e1105b..0000000000 --- a/ci/environments/scenario001-standalone.yaml +++ /dev/null @@ -1,222 +0,0 @@ -resource_registry: - OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml - OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml - OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml - OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml - OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml - OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml - OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml - OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml - OS::TripleO::Services::Multipathd: ../../deployment/multipathd/multipathd-container-ansible.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::Rsyslog: ../../deployment/logging/rsyslog-container-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - -parameter_defaults: - NodeDataLookup: - AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"} - 8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"} - StandaloneExtraConfig: - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the 'high' Gnocchi - # archive policy to keep 1 point every 60s. The test will take 2 minutes - # instead of 10 minutes. Note that tempest telemetry.alarm_granularity must - # in sync with the archive policy, 60s too. - ceilometer::agent::polling::polling_interval: 15 - tripleo::profile::base::metrics::qdr::interior_mesh_nodes: '' - collectd::plugin::ceph::daemons: - - 'ceph-mon.controller-00' - - 'ceph-mon.controller-01' - - 'ceph-mon.controller-02' - - 'ceph-osd.01' - - 'ceph-osd.07' - NotificationDriver: 'messagingv2' - ManagePolling: true - HeatConfigureDelegatedRoles: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - CeilometerQdrPublishEvents: true - CeilometerQdrPublishMetrics: true - ManageEventPipeline: true - ManagePipeline: true - Debug: true - DeployedCeph: true - CephEnableDashboard: true - CephDashboardPort: 8445 - GrafanaDashboardPort: 3200 - CinderRbdExtraPools: altrbd,pool2,pool3 - CephPools: - - name: altrbd - rule_name: replicated_rule - application: rbd - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephExtraKeys: - - name: "client.glance" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "profile rbd pool=images" - key: "AQBRgQ9eAAAAABAAv84zEilJYZPNuJ0Iwn9Ndg==" - mode: "0600" - CephExternalMultiConfig: - # create client conf and key file for two non-existent external ceph clusters - - cluster: 'ceph2' - fsid: 'af25554b-42f6-4d2b-9b9b-d08a1132d3e8' - external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7' - keys: - - name: "client.openstack" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images" - key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q==" - mode: "0600" - dashboard_enabled: false - - cluster: 'ceph3' - fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a' - external_cluster_mon_ips: '172.18.0.8,172.18.0.9,172.18.0.10' - keys: - - name: "client.openstack" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images" - key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB2Q==" - mode: "0600" - dashboard_enabled: false - NovaEnableRbdBackend: true - CinderEnableRbdBackend: true - CinderRbdBackendName: tripleo_ceph,tripleo_ceph2,tripleo_ceph3 - CinderRbdMultiConfig: - tripleo_ceph2: - CephClusterName: ceph2 - CephClusterFSID: af25554b-42f6-4d2b-9b9b-d08a1132d3e8 - CinderRbdAvailabilityZone: ceph2-AZ - tripleo_ceph3: - CephClusterName: ceph3 - CephClusterFSID: e2cba068-5f14-4b0f-b047-acf375c0004a - CinderRbdAvailabilityZone: ceph3-AZ - CinderBackupBackend: ceph - GlanceEnabledImportMethods: 'glance-direct,web-download,copy-image' - # For copy-image method, we are overriding the policy here to allow - # everyone and every type of image (private or public) to copy. - # This way we will be able to test copy image via non-admin as well - # as on private images. The policy is default to admin only in glance - GlanceApiPolicies: - glance-copy_image: - key: "copy_image" - value: "" - GlanceBackend: cinder - GlanceStoreDescription: 'Cinder glance store' - GlanceMultistoreConfig: - rbd_store: - GlanceBackend: rbd - GlanceStoreDescription: 'RBD glance store' - CephClientUserName: 'glance' - CephClusterName: ceph - GnocchiBackend: rbd - CinderEnableIscsiBackend: false - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - EnableSTF: true - MetricsQdrAddresses: - - prefix: collectd - distribution: multicast - MetricsQdrSSLProfiles: - - name: sslProfile - CollectdConnectionType: amqp1 - CollectdAmqpInterval: 5 - CollectdDefaultPollingInterval: 5 - CollectdEnableSensubility: true - CollectdSensubilityTransport: amqp1 - CollectdSensubilityLogLevel: DEBUG - CollectdEnableLibpodstats: true - CollectdAmqpInstances: - notify: - notify: true - format: JSON - presettle: false - telemetry: - format: JSON - presettle: false - CollectdAmqpSendQueueLimit: 40 - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - ContainerCli: podman - CephAdmVerbose: true - CephConfigOverrides: - # put logs in /var/log/ceph/ - log_to_file: true - mon_cluster_log_to_file: true - # disable logging to journald so we don't log twice - log_to_stderr: false - mon_cluster_log_to_stderr: false - log_to_journald: false - mon_cluster_log_to_journald: false - # test values - anotherkey: anothervalue - global: - globalkey: globalvalue - osd: - osdkey: osdvalue - mon: - mon_warn_on_pool_no_redundancy: false - mon_warn_on_insecure_global_id_reclaim_allowed: false - NfsUrl: 127.0.0.1 - CephMsgrSecureMode: true - CephConfigPath: "/etc/ceph" - DisableCephadm: true - LVMFilterEnabled: true - LVMFilterAllowlist: - - /dev/loop2 - - /dev/loop3 - - /dev/loop4 - MultipathdEnable: true - NovaLibvirtVolumeUseMultipath: true - NovaShowHostStatus: all - NovaApiHostStatusPolicy: > - (role:reader and system_scope:all) or (role:reader and project_id:%(project_id)s) - RsyslogElasticsearchSetting: - Server: '127.0.0.1:9200' diff --git a/ci/environments/scenario002-standalone.yaml b/ci/environments/scenario002-standalone.yaml deleted file mode 100644 index 0063381918..0000000000 --- a/ci/environments/scenario002-standalone.yaml +++ /dev/null @@ -1,68 +0,0 @@ -resource_registry: - OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml - OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml - OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml - OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml - OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml - OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - OS::TripleO::Services::BarbicanApi: ../../deployment/barbican/barbican-api-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::BarbicanBackendSimpleCrypto: ../../deployment/barbican/barbican-backend-simple-crypto-puppet.yaml - # NOTE(mwhahaha): cheat and use the horizon "service" for our test service - # since disable horizon. This allows us to not have to keep some test service - # in the defaults just for this case. - OS::TripleO::Services::Horizon: ../../deployment/tests/test-container-volume.yaml - -parameter_defaults: - StandaloneExtraConfig: - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the 'high' Gnocchi - # archive policy to keep 1 point every 60s. The test will take 2 minutes - # instead of 10 minutes. Note that tempest telemetry.alarm_granularity must - # in sync with the archive policy, 60s too. - ceilometer::agent::polling::polling_interval: 15 - NotificationDriver: 'messagingv2' - ManagePolling: true - ManagePipeline: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - Debug: true - DockerPuppetDebug: true - SwiftCeilometerPipelineEnabled: false - BarbicanSimpleCryptoGlobalDefault: true - ContainerCli: podman - # Test mounting a container volume into the heat api container - ContainerTestVolumeName: test_volume - CinderVolumeOptVolumes: - - test_volume:/testvol - GlanceApiOptVolumes: - - test_volume:/testvol - HeatApiOptVolumes: - - test_volume:/testvol - HeatEngineApiOptVolumes: - - test_volume:/testvol - NeutronApiOptVolumes: - - test_volume:/testvol - NovaComputeOptVolumes: - - test_volume:/testvol - StandaloneExtraGroupVars: - cinder_fake_group_var_one: var_one_override - StandaloneParameters: - LVMFilterEnabled: true - LVMFilterAllowlist: - - /dev/allowed - NovaShowHostStatus: unknown-only diff --git a/ci/environments/scenario003-standalone.yaml b/ci/environments/scenario003-standalone.yaml deleted file mode 100644 index 2ecfd26f43..0000000000 --- a/ci/environments/scenario003-standalone.yaml +++ /dev/null @@ -1,48 +0,0 @@ -resource_registry: - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::DesignateApi: ../../deployment/designate/designate-api-container-puppet.yaml - OS::TripleO::Services::DesignateCentral: ../../deployment/designate/designate-central-container-puppet.yaml - OS::TripleO::Services::DesignateProducer: ../../deployment/designate/designate-producer-container-puppet.yaml - OS::TripleO::Services::DesignateWorker: ../../deployment/designate/designate-worker-container-puppet.yaml - OS::TripleO::Services::DesignateMDNS: ../../deployment/designate/designate-mdns-container-puppet.yaml - OS::TripleO::Services::DesignateBind: ../../deployment/designate/designate-bind-container.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::Unbound: ../../deployment/unbound/unbound-container-ansible.yaml - -parameter_defaults: - Debug: true - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - KeystoneTokenProvider: 'fernet' - SwiftCeilometerPipelineEnabled: false - NotificationDriver: 'noop' - NeutronPluginExtensions: qos,port_security,dns_domain_ports - NotifyPort: 5672 - ContainerCli: podman - DesignateBackendListenIPs: ["192.168.24.20"] - ExtraFirewallRules: - '301 allow arbitrary tcp rule': - dport: 12345 - proto: tcp - source: 127.0.0.1 - action: insert - '302 allow arbitrary udp rule': - dport: 12345 - proto: udp - source: 127.0.0.1 - action: append - StandaloneParameters: - ExtraFirewallRules: - '303 allow arbitrary tcp rule for controller': - dport: 12347 - proto: tcp - source: 127.0.0.1 - action: insert diff --git a/ci/environments/scenario004-standalone.yaml b/ci/environments/scenario004-standalone.yaml deleted file mode 100644 index 8dd0701af7..0000000000 --- a/ci/environments/scenario004-standalone.yaml +++ /dev/null @@ -1,87 +0,0 @@ -resource_registry: - OS::TripleO::Services::Aide: ../../deployment/aide/aide-baremetal-ansible.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Redis: OS::Heat::None - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephMds: ../../deployment/cephadm/ceph-mds.yaml - OS::TripleO::Services::CephNfs: ../../deployment/cephadm/ceph-nfs.yaml - OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::ManilaApi: ../../deployment/manila/manila-api-container-puppet.yaml - OS::TripleO::Services::ManilaScheduler: ../../deployment/manila/manila-scheduler-container-puppet.yaml - OS::TripleO::Services::ManilaShare: ../../deployment/manila/manila-share-pacemaker-puppet.yaml - OS::TripleO::Services::ManilaBackendCephFs: ../../deployment/manila/manila-backend-cephfs.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::LoginDefs: ../../deployment/login-defs/login-defs-baremetal-ansible.yaml - -parameter_defaults: - GlanceSparseUploadEnabled: true - ManagePolling: true - Debug: true - CephEnableDashboard: false - CephDashboardPort: 8445 - CephAdmVerbose: true - GrafanaDashboardPort: 3200 - DeployedCeph: true - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephRgwKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' - NovaEnableRbdBackend: true - GlanceBackend: rbd - CeilometerEnableGnocchi: true - GnocchiBackend: rbd - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdExtraPlugins: - - rrdtool - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - CephConfigOverrides: - # put logs in /var/log/ceph/ - log_to_file: true - mon_cluster_log_to_file: true - # disable logging to journald so we don't log twice - log_to_stderr: false - mon_cluster_log_to_stderr: false - log_to_journald: false - mon_cluster_log_to_journald: false - # test values - globalkey: globalvalue - anotherkey: anothervalue - mon: - mon_warn_on_pool_no_redundancy: false - mon_warn_on_insecure_global_id_reclaim_allowed: false - ManilaCephFSCephFSProtocolHelperType: 'NFS' - DisableCephadm: true - CephConfigPath: "/etc/ceph" - CephObservabilityDataOnly: true diff --git a/ci/environments/scenario007-multinode-containers.yaml b/ci/environments/scenario007-multinode-containers.yaml deleted file mode 100644 index b65f5f7e49..0000000000 --- a/ci/environments/scenario007-multinode-containers.yaml +++ /dev/null @@ -1,114 +0,0 @@ -resource_registry: - OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml - OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml - OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml - OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml - OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml - OS::TripleO::Services::ComputeNeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - # Disable OVN services - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - # environments/docker-ha.yaml enables the cinder service. So disable cinder services - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::NeutronDhcpAgent - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::NeutronOvsAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - # Required for Centos 7.3 and Qemu 2.6.0 - nova::compute::libvirt::cpu_mode: 'none' - # For ML2/OVS - NeutronMechanismDrivers: 'openvswitch' - NeutronPluginExtensions: "qos,port_security" - NeutronTypeDrivers: 'vxlan,vlan,flat,gre' - NeutronNetworkType: 'vxlan' - NeutronServicePlugins: 'router,qos,segments,trunk' - NeutronVniRanges: ['1:4094', ] - NeutronEnableDVR: false - DhcpAgentNotification: true - KernelIpNonLocalBind: 0 - Debug: true - DockerPuppetDebug: True - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - MessageOfTheDay: | - This is a test message of MessageOfTheDay - ControllerParameters: - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - This node is a Controller node. - MessageOfTheDay: | - This is a test message of MessageOfTheDay - This node is a Controller node. - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - IronicCleaningDiskErase: 'metadata' - NotificationDriver: 'noop' diff --git a/ci/environments/scenario007-standalone.yaml b/ci/environments/scenario007-standalone.yaml deleted file mode 100644 index 13ace9c438..0000000000 --- a/ci/environments/scenario007-standalone.yaml +++ /dev/null @@ -1,51 +0,0 @@ -resource_registry: - OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml - OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml - OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml - OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml - OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml - - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - # Disable OVN services - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None -parameter_defaults: - DhcpAgentNotification: true - NeutronMechanismDrivers: ['openvswitch'] - NeutronTypeDrivers: 'vxlan,vlan,flat,gre' - NeutronNetworkType: 'vxlan' - NeutronPluginExtensions: "qos,port_security" - NeutronServicePlugins: 'router,qos,segments,trunk' - NeutronVniRanges: ['1:4094', ] - NeutronEnableDVR: false - NeutronL3HA: true - KernelIpNonLocalBind: 0 - Debug: true - DockerPuppetDebug: True - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - MessageOfTheDay: | - This is a test message of MessageOfTheDay - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario010-multinode-containers.yaml b/ci/environments/scenario010-multinode-containers.yaml deleted file mode 100644 index 016877c08e..0000000000 --- a/ci/environments/scenario010-multinode-containers.yaml +++ /dev/null @@ -1,104 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OctaviaApi - - OS::TripleO::Services::OctaviaDeploymentConfig - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::OctaviaHousekeeping - - OS::TripleO::Services::OctaviaWorker - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - octavia::controller::connection_retry_interval: 10 - Debug: true - CephPools: - - name: altrbd - rule_name: replicated_rule - CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' - CephClusterName: mycephcluster - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - NovaEnableRbdBackend: true - CeilometerEnableGnocchi: true - GlanceBackend: rbd - GnocchiRbdPoolName: metrics - SwiftCeilometerPipelineEnabled: false - NeutronEnableForceMetadata: true - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaManageNovaFlavor: true - # For now, we hardcode it but soon it'll be generated in tripleo-common - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaGenerateCerts: true - ContainerCli: podman - CephConfigPath: "/etc/ceph" - CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml" - CephSpecFqdn: true - CephOsdSpec: - data_devices: - paths: - - /dev/ceph_vg/ceph_lv_data diff --git a/ci/environments/scenario010-standalone.yaml b/ci/environments/scenario010-standalone.yaml deleted file mode 100644 index f31ee0896d..0000000000 --- a/ci/environments/scenario010-standalone.yaml +++ /dev/null @@ -1,70 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - StandaloneExtraConfig: - octavia::controller::connection_retry_interval: 10 - OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaManageNovaFlavor: true - OctaviaGenerateCerts: true - NodeDataLookup: - AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"} - 8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"} - Debug: true - HideSensitiveLogs: false - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - DeployedCeph: true - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - #NOTE: This scenario uses rbd backend to test the deployment feature to - # upload the amphora image in raw format instead of qcow2 format. - # (See https://bugs.launchpad.net/tripleo/+bug/1778303 for details) - NovaEnableRbdBackend: true - GlanceBackend: rbd - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdExtraPlugins: - - rrdtool - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - ContainerCli: podman - CephConfigPath: "/etc/ceph" diff --git a/ci/environments/scenario012-standalone.yaml b/ci/environments/scenario012-standalone.yaml deleted file mode 100644 index 6bb824ee11..0000000000 --- a/ci/environments/scenario012-standalone.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# touching file so CI runs scenario012 -resource_registry: - OS::TripleO::Services::NovaIronic: ../../deployment/nova/nova-ironic-container-puppet.yaml - OS::TripleO::Services::IronicApi: ../../deployment/ironic/ironic-api-container-puppet.yaml - OS::TripleO::Services::IronicConductor: ../../deployment/ironic/ironic-conductor-container-puppet.yaml - OS::TripleO::Services::IronicPxe: ../../deployment/ironic/ironic-pxe-container-puppet.yaml - OS::TripleO::NodeExtraConfigPost: ../common/ironic_standalone_post.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NovaCompute: OS::Heat::None - OS::TripleO::Services::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - -parameter_defaults: - Debug: true - DockerPuppetDebug: True - NotificationDriver: 'noop' - SwiftCeilometerPipelineEnabled: false - IronicCleaningDiskErase: 'metadata' - NovaSchedulerDiscoverHostsInCellsInterval: 15 - NeutronMechanismDrivers: ovn - NeutronNetworkType: vlan - NeutronTypeDrivers: local,geneve,vlan,flat - DhcpAgentNotification: true - IronicApiMaxRetries: 180 - IronicDefaultNetworkInterface: neutron - IronicAutomatedClean: false - NeutronFlatNetworks: datacentre - NeutronNetworkVLANRanges: 'datacentre:500:599,tenant:300:399' diff --git a/ci/environments/scenario013-standalone.yaml b/ci/environments/scenario013-standalone.yaml deleted file mode 100644 index 562b42ae23..0000000000 --- a/ci/environments/scenario013-standalone.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# This scenario is same as scenario10 but without Ceph. -resource_registry: - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - StandaloneExtraConfig: - octavia::controller::connection_retry_interval: 10 - OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaManageNovaFlavor: true - OctaviaGenerateCerts: true - Debug: true - GlanceBackend: 'file' - ContainerCli: podman diff --git a/ci/environments/standalone-ipa.yaml b/ci/environments/standalone-ipa.yaml deleted file mode 100644 index 31daf18851..0000000000 --- a/ci/environments/standalone-ipa.yaml +++ /dev/null @@ -1,22 +0,0 @@ -resource_registry: - OS::TripleO::Services::IpaClient: ../../deployment/ipa/ipaservices-baremetal-ansible.yaml - -parameter_defaults: - CloudDomain: "ooo.test" - CloudName: "overcloud.ooo.test" - CloudNameInternal: "overcloud.internalapi.ooo.test" - CloudNameStorage: "overcloud.storage.ooo.test" - CloudNameStorageManagement: "overcloud.storagemgmt.ooo.test" - CloudNameCtlplane: "overcloud.ctlplane.ooo.test" - IdMServer: "ipa.ooo.test" - IdMDomain: "ooo.test" - IdMAdminPassword: "fce95318204114530f31f885c9df588f" - # domain name used by the host - NeutronDnsDomain: "ooo.test" - PasswordAuthentication: 'yes' - HAProxyExternalNetwork: ctlplane - # Add setting for custom DNS server - # DnsServers: ["ip_for_dns_server"] - DnsSearchDomains: ["ooo.test"] - LocalContainerRegistry: "192.168.24.1" - AddVipsToEtcHosts: True diff --git a/ci/network_data.yaml b/ci/network_data.yaml deleted file mode 100644 index 576bb514aa..0000000000 --- a/ci/network_data.yaml +++ /dev/null @@ -1,50 +0,0 @@ -- name: External - name_lower: external - mtu: 1350 - vip: true - subnets: - external_subnet: - ip_subnet: 10.0.0.0/24 - gateway_ip: 10.0.0.1 - allocation_pools: - - start: 10.0.0.10 - end: 10.0.0.50 -- name: InternalApi - name_lower: internal_api - mtu: 1350 - vip: true - subnets: - internal_api_subnet: - ip_subnet: 172.17.0.0/24 - allocation_pools: - - start: 172.17.0.10 - end: 172.17.0.250 -- name: Storage - name_lower: storage - mtu: 1350 - vip: true - subnets: - storage_subnet: - ip_subnet: 172.18.0.0/24 - allocation_pools: - - start: 172.18.0.10 - end: 172.18.0.250 -- name: StorageMgmt - name_lower: storage_mgmt - mtu: 1350 - vip: true - subnets: - storage_mgmt_subnet: - ip_subnet: 172.19.0.0/24 - allocation_pools: - - start: 172.19.0.10 - end: 172.19.0.250 -- name: Tenant - name_lower: tenant - mtu: 1350 - subnets: - tenant_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/network_data_v6.yaml b/ci/network_data_v6.yaml deleted file mode 100644 index 3862711c53..0000000000 --- a/ci/network_data_v6.yaml +++ /dev/null @@ -1,54 +0,0 @@ -- name: External - name_lower: external - mtu: 1350 - vip: true - ipv6: true - subnets: - external_subnet: - ipv6_subnet: 2001:db8:fd00:1000::/64 - gateway_ipv6: 2001:db8:fd00:1000::1 - ipv6_allocation_pools: - - start: 2001:db8:fd00:1000::10 - end: 2001:db8:fd00:1000:ffff:ffff:ffff:fffe -- name: InternalApi - name_lower: internal_api - mtu: 1350 - vip: true - ipv6: true - subnets: - internal_api_subnet: - ipv6_subnet: fd00:fd00:fd00:2000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:2000::10 - end: fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe -- name: Storage - name_lower: storage - mtu: 1350 - vip: true - ipv6: true - subnets: - storage_subnet: - ipv6_subnet: fd00:fd00:fd00:3000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:3000::10 - end: fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe -- name: StorageMgmt - name_lower: storage_mgmt - mtu: 1350 - vip: true - ipv6: true - subnets: - storage_mgmt_subnet: - ipv6_subnet: fd00:fd00:fd00:4000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:4000::10 - end: fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe -- name: Tenant - name_lower: tenant - mtu: 1350 - subnets: - tenant_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/vip_data.yaml b/ci/vip_data.yaml deleted file mode 100644 index 653130bd1d..0000000000 --- a/ci/vip_data.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt -- network: internal_api -- network: storage -- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP - network: external -- network: ctlplane diff --git a/ci/vip_data_v6.yaml b/ci/vip_data_v6.yaml deleted file mode 100644 index 74c69c629f..0000000000 --- a/ci/vip_data_v6.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt -- network: internal_api -- network: storage -- ip_address: 2001:DB8:FD00:1000:0:0:0:5 # CI jobs rely on this fixed ip for the external VIP - network: external -- network: ctlplane diff --git a/common/common-container-config-scripts.yaml b/common/common-container-config-scripts.yaml deleted file mode 100644 index 95b50cb56a..0000000000 --- a/common/common-container-config-scripts.yaml +++ /dev/null @@ -1,25 +0,0 @@ -- name: Create fcontext entry for container-config-scripts - community.general.sefcontext: - target: "/var/lib/container-config-scripts(/.*)?" - setype: container_file_t - state: present - tags: - - container_config_scripts - -- name: Create /var/lib/container-config-scripts - become: true - file: - path: /var/lib/container-config-scripts - state: directory - setype: container_file_t - tags: - - container_config_scripts - -- name: Write container config scripts - become: true - no_log: True - tripleo_container_config_scripts: - config_data: "{{ lookup('file', tripleo_role_name + '/container_config_scripts.yaml', errors='ignore') | default({}, True) | from_yaml }}" - config_dir: /var/lib/container-config-scripts - tags: - - container_config_scripts diff --git a/common/common-container-setup-tasks.yaml b/common/common-container-setup-tasks.yaml deleted file mode 100644 index dd24a31d5f..0000000000 --- a/common/common-container-setup-tasks.yaml +++ /dev/null @@ -1,104 +0,0 @@ -- name: Create and ensure setype for /var/log/containers directory - become: true - file: - path: /var/log/containers - state: directory - selevel: s0 - mode: 0750 - tags: - - host_config - -- name: Create ContainerLogStdoutPath directory - become: true - file: - path: "{{ container_log_stdout_path }}" - state: directory - selevel: s0 - tags: - - host_config - -- name: Create /var/lib/tripleo-config directory - become: true - file: - path: /var/lib/tripleo-config - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - -# The container config files -# /var/lib/container-startup-configs.json is removed as we now write -# per-step files instead -- name: Clean old /var/lib/container-startup-configs.json file - become: true - file: - path: /var/lib/container-startup-configs.json - state: absent - tags: - - container_startup_configs - -# For legacy, can be removed in Train cycle -- name: Clean old /var/lib/docker-container-startup-configs.json file - become: true - file: - path: /var/lib/docker-container-startup-configs.json - state: absent - tags: - - container_startup_configs - -- import_tasks: common_container_config_scripts.yaml - -# This file location is deprecated and the new location is now: -# /var/lib/tripleo-config/container-startup-config/step_X/.json -# Can be removed in V cycle -- name: Write /var/lib/tripleo-config/container-startup-config-readme.txt - become: true - no_log: True - copy: - content: "Container startup configs moved to /var/lib/tripleo-config/container-startup-config" - dest: /var/lib/tripleo-config/container-startup-config-readme.txt - force: true - mode: '0600' - tags: - - container_startup_configs - -- name: Generate startup configs files per step and per container - become: true - container_startup_config: - config_base_dir: /var/lib/tripleo-config/container-startup-config - config_data: "{{ lookup('template', (tripleo_role_name + '/docker_config.yaml'), errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_startup_configs - -- name: Create /var/lib/kolla/config_files directory - become: true - file: - path: /var/lib/kolla/config_files - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - container_startup_configs - -- name: Create /var/lib/config-data directory - become: true - file: - path: /var/lib/config-data - mode: 0755 - state: directory - setype: container_file_t - selevel: s0 - -- name: Write container config json files - become: true - tripleo_container_configs: - config_data: "{{ lookup('file', tripleo_role_name + '/kolla_config.yaml', errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_startup_configs diff --git a/common/container-puppet.sh b/common/container-puppet.sh deleted file mode 100644 index b991b47a49..0000000000 --- a/common/container-puppet.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -e -[ "$DEBUG" = "false" ] || set -x -rm -rf /etc/puppet -mkdir -p /etc/puppet -cp -dR /tmp/puppet-etc/* /etc/puppet -rm -Rf /etc/puppet/ssl # not in use and causes permission errors -echo "{\"step\": $STEP}" > /etc/puppet/hieradata/docker_puppet.json -TAGS="" -if [ -n "$PUPPET_TAGS" ]; then - TAGS="--tags \"$PUPPET_TAGS\"" -fi - -if [ ! -z ${STEP_CONFIG+x} ]; then - # package and service should NOT be managed inside containers - echo -e "noop_resource('package')" | tee /etc/config.pp - echo -e "noop_resource('service')" | tee -a /etc/config.pp - - echo -e "${STEP_CONFIG}" | tee -a /etc/config.pp -fi - -CHECK_MODE="" -if [ -d "/tmp/puppet-check-mode" ]; then - mkdir -p /etc/puppet/check-mode - cp -a /tmp/puppet-check-mode/* /etc/puppet/check-mode - CHECK_MODE="--hiera_config /etc/puppet/check-mode/hiera.yaml" -fi - -# Create a reference timestamp to easily find all files touched by -# puppet. The sync ensures we get all the files we want due to -# different timestamp. -conf_data_path="/var/lib/config-data/${NAME}" -origin_of_time="${conf_data_path}.origin_of_time" -touch $origin_of_time -sync - -export NET_HOST="${NET_HOST:-false}" -set +e -if [ "$NET_HOST" == "false" ]; then - export FACTER_hostname=$HOSTNAME -fi -export FACTER_uuid=$(cat /sys/class/dmi/id/product_uuid | tr '[:upper:]' '[:lower:]') -echo 'Running puppet' -# FIXME(bogdando): stdout may be falling behind of the logged syslog messages -set -x -/usr/bin/puppet apply --summarize \ - --detailed-exitcodes \ - --color=false \ - --modulepath=/etc/puppet/modules:/usr/share/openstack-puppet/modules \ - $TAGS \ - $CHECK_MODE \ - /etc/config.pp \ - 2>&1 | logger -s -t puppet-user -rc=${PIPESTATUS[0]} -[ "$DEBUG" = "false" ] && set +x -set -e -if [ $rc -ne 2 -a $rc -ne 0 ]; then - exit $rc -fi - -verbosity="" -[ "$DEBUG" = "false" ] || verbosity="-v" - -# Disables archiving -if [ -z "$NO_ARCHIVE" ]; then - archivedirs=("/etc" "/root" "/opt" "/var/www" "/var/spool/cron" "/var/lib/nova/.ssh") - rsync_srcs="" - for d in "${archivedirs[@]}"; do - if [ -d "$d" ]; then - rsync_srcs+=" $d" - fi - done - # On stack update, if a password was changed in a config file, - # some services (e.g. mysql) must change their internal state - # (e.g. password in mysql DB) when tripleo_container_manage restarts them; - # and they need the old password to achieve that. - # For those services, we update the config hash to notify - # tripleo_container_manage that a restart is needed, but we do not update - # the password file in container-puppet if the file already existed - # before and let the service regenerate it instead. - password_files="/root/.my.cnf" - - exclude_files="" - for p in $password_files; do - if [ -f "$p" -a -f "${conf_data_path}$p" ]; then - exclude_files+=" --exclude=$p" - fi - done - - # Exclude read-only mounted directories/files which we do not want - # to copy or delete. - ro_files="/etc/puppet/ /etc/puppetlabs/ /opt/puppetlabs/ /etc/pki/ca-trust/extracted " - ro_files+="/etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ca-bundle.crt " - ro_files+="/etc/pki/tls/certs/ca-bundle.trust.crt /etc/pki/tls/cert.pem " - ro_files+="/etc/hosts /etc/localtime /etc/hostname " - # /etc/openldap is bind mounted with "ro" option in keystone containers. - ro_files+="/etc/openldap" - for ro in $ro_files; do - if [ -e "$ro" ]; then - exclude_files+=" --exclude=$ro" - fi - done - - echo "Evaluating config files to be removed for the $NAME configuration" - TMPFILE=$(mktemp /tmp/tmp.XXXXXXXXXX) - TMPFILE2=$(mktemp /tmp/tmp.XXXXXXXXXX) - trap 'rm -rf $TMPFILE $TMPFILE2' EXIT INT HUP - rsync -av -R --dry-run --delete-after $exclude_files $rsync_srcs ${conf_data_path} |\ - awk '/^deleting/ {print $2}' > $TMPFILE - - echo "Rsyncing config files from ${rsync_srcs} into ${conf_data_path}" - rsync -a $verbosity -R --delay-updates --delete-after $exclude_files $rsync_srcs ${conf_data_path} - - # Also make a copy of files modified during puppet run - echo "Gathering files modified after $(stat -c '%y' $origin_of_time)" - - # Purge obsoleted contents to maintain a fresh and filtered mirror - puppet_generated_path=/var/lib/config-data/puppet-generated/${NAME} - mkdir -p ${puppet_generated_path} - echo "Ensuring the removed config files are also purged in ${puppet_generated_path}:" - cat $TMPFILE | sort - cat $TMPFILE | xargs -n1 -r -I{} \ - bash -c "rm -rf ${puppet_generated_path}/{}" - exec 5>&1 - exec 1>$TMPFILE2 - find $rsync_srcs -newer $origin_of_time -not -path '/etc/puppet*' -print0 - exec 1>&5 - echo "Files modified during puppet run:" - cat $TMPFILE2 | xargs -0 printf "%s\n" | sort -h - echo "Rsyncing the modified files into ${puppet_generated_path}" - rsync -a $verbosity -R -0 --delay-updates --delete-after $exclude_files \ - --files-from=$TMPFILE2 / ${puppet_generated_path} - - # Cleanup any special files that might have been copied into place - # previously because fixes for LP#1860607 did not cleanup and required - # manual intervention if a container hit this. We can safely remove these - # files because they should be bind mounted into containers - for ro in $ro_files; do - if [ -e "${puppet_generated_path}/${ro}" ]; then - rm -rf "${puppet_generated_path}/${ro}" - fi - done - - # Write a checksum of the config-data dir, this is used as a - # salt to trigger container restart when the config changes - # note: while being excluded from the output, password files - # are still included in checksum computation - additional_checksum_files="" - excluded_original_passwords="" - for p in $password_files; do - if [ -f "$p" ]; then - additional_checksum_files+=" $p" - excluded_original_passwords+=" --exclude=/var/lib/config-data/*${p}" - fi - done - # We need to exclude the swift rings and backups as those change over time - # and containers do not need to restart if they change - EXCLUDE=--exclude='*/etc/swift/backups/*'\ --exclude='*/etc/swift/*.ring.gz'\ --exclude='*/etc/swift/*.builder'\ --exclude='*/etc/libvirt/passwd.db'\ ${excluded_original_passwords} - - # We need to repipe the tar command through 'tar xO' to force text - # output because otherwise the sed command cannot work. The sed is - # needed because puppet puts timestamps as comments in cron and - # parsedfile resources, hence triggering a change at every redeploy - tar -c --mtime='1970-01-01' $EXCLUDE -f - ${conf_data_path} $additional_checksum_files | tar xO | \ - sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${conf_data_path}.md5sum - tar -c --mtime='1970-01-01' $EXCLUDE -f - ${puppet_generated_path} $additional_checksum_files --mtime='1970-01-01' | tar xO \ - | sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${puppet_generated_path}.md5sum -fi diff --git a/common/deploy-steps-playbooks-common.yaml b/common/deploy-steps-playbooks-common.yaml deleted file mode 100644 index f53b00a466..0000000000 --- a/common/deploy-steps-playbooks-common.yaml +++ /dev/null @@ -1,101 +0,0 @@ ---- -# If a new play with potential facts gathering is added, make sure we set: -# any_errors_fatal: false -# ignore_unreachable: true -# So we don't break the MaxFailPercentage feature since these plays can't use -# Ansible strategies. - -- hosts: localhost - name: Check if required variables are defined - tasks: - - ansible.builtin.fail: - msg: deploy_source_host variable is not defined - when: deploy_source_host is not defined - - ansible.builtin.fail: - msg: deploy_target_host variable is not defined - when: deploy_target_host is not defined - # Keep in mind that you will still need to use defaults in order to be - # able to pass Ansible own syntax check for blocks like 'hosts'. - -- hosts: all - name: Clear cached facts - # We don't want to gather facts, just clear them - gather_facts: false - tasks: - - ansible.builtin.meta: clear_facts - tags: - - facts - -- hosts: "{{ deploy_source_host | default([]) }}:{{ deploy_target_host | default([]) }}" - name: Gather facts - strategy: tripleo_free - gather_facts: true - tasks: - - name: Set legacy facts - ansible.builtin.set_fact: - ansible_distribution: "{{ ansible_facts['distribution'] }}" - ansible_distribution_major_version: "{{ ansible_facts['distribution_major_version'] }}" - ansible_distribution_release: "{{ ansible_facts['distribution_release'] }}" - ansible_distribution_version: "{{ ansible_facts['distribution_version'] }}" - ansible_os_family: "{{ ansible_facts['os_family'] }}" - tags: - - facts - -- hosts: all - name: Load global variables - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - ansible.builtin.include_vars: global_vars.yaml # noqa: missing-import - no_log: true - tags: - - always - -- hosts: all - name: Include extra variables from files - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - name: Include Service VIP vars - ansible.builtin.include_vars: - file: "{{ playbook_dir }}/service_vip_vars.yaml" - name: service_vip_vars - ignore_errors: true - - name: Include OVN bridge MAC address variables - ansible.builtin.include_vars: - file: "{{ playbook_dir }}/ovn_bridge_mac_address_vars.yaml" - name: ovn_bridge_mac_address_vars - ignore_errors: true - tags: - - always - -- hosts: "{{ deploy_target_host | default([]) }}" - name: Render all_nodes data as group_vars for overcloud - gather_facts: "{{ gather_facts | default(false) }}" - # we don't skip errors or ignore unreachable on this one because it's - # delegated to localhost. localhost should never be unreachable (tm) - any_errors_fatal: True - tasks: - - name: Render all_nodes data as group_vars for overcloud - delegate_to: localhost - become: false - run_once: true - tripleo_all_nodes_data: - tags: - - facts - - always - -- hosts: "{{ deploy_target_host | default([]) }}" - name: Set all_nodes data as group_vars for overcloud - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - name: Set all_nodes data as group_vars for overcloud - ansible.builtin.include_vars: "{{ playbook_dir }}/group_vars/overcloud.json" - no_log: true - tags: - - facts - - always diff --git a/common/deploy-steps-tasks-step-0.yaml b/common/deploy-steps-tasks-step-0.yaml deleted file mode 100644 index 747fef834d..0000000000 --- a/common/deploy-steps-tasks-step-0.yaml +++ /dev/null @@ -1,46 +0,0 @@ -- name: Deploy step tasks for step 0 - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Deploy step tasks for step 0' to resume from this task - when: "tripleo_minor_update is not defined or tripleo_minor_update != 'true'" -- name: Ensure /var/log/journal exists - become: true - file: path=/var/log/journal state=directory mode=0750 owner=root group=root setype=var_log_t -- name: Check cloud-init status - shell: systemctl is-active cloud-init.service || systemctl is-enabled cloud-init.service - failed_when: false - become: true - register: cloud_init_enabled -- name: Check if cloud-init is disabled via kernel args - command: grep -q cloud-init=disabled /proc/cmdline - failed_when: false - check_mode: false - register: cloud_init_vendor_disabled -- name: Wait for cloud-init to finish, if enabled - community.general.cloud_init_data_facts: - filter: status - register: res - until: > - res.cloud_init_data_facts.status.v1.stage is defined and - not res.cloud_init_data_facts.status.v1.stage - retries: 50 - delay: 5 - when: - - cloud_init_enabled.rc is defined - - cloud_init_enabled.rc == 0 - - cloud_init_vendor_disabled.rc is not defined or cloud_init_vendor_disabled.rc != 0 -- name: Create /var/lib/container-puppet - become: true - no_log: True - file: path=/var/lib/container-puppet state=directory setype=container_file_t selevel=s0 recurse=true -- name: Write container-puppet.sh - become: true - no_log: True - copy: src=container_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.sh force=yes mode=0755 setype=container_file_t -- include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/deploy_steps_tasks_step0.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool diff --git a/common/deploy-steps-tasks-step-1.yaml b/common/deploy-steps-tasks-step-1.yaml deleted file mode 100644 index 5cd62f558c..0000000000 --- a/common/deploy-steps-tasks-step-1.yaml +++ /dev/null @@ -1,196 +0,0 @@ -- name: Delete existing /var/lib/tripleo-config/check-mode directory for check mode - become: true - file: - path: /var/lib/tripleo-config/check-mode - state: absent - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - when: - - ansible_check_mode|bool - check_mode: false - -- name: Create /var/lib/tripleo-config/check-mode directory for check mode - become: true - file: - path: /var/lib/tripleo-config/check-mode - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - when: - - ansible_check_mode|bool - check_mode: false - -# Puppet manifest for baremetal host configuration -- name: Write the puppet step_config manifest - become: true - no_log: True - copy: - content: "{{ lookup('file', tripleo_role_name + '/step_config.pp', errors='ignore') | default('', True) }}" - dest: /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp - force: true - mode: '0600' - tags: - - host_config - check_mode: false - diff: false - -- name: Diff puppet step_config manifest changes for check mode - command: - diff -uN /var/lib/tripleo-config/puppet_step_config.pp /var/lib/tripleo-config/check-mode/puppet_step_config.pp - register: diff_results - tags: - - host_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff puppet step_config manifest changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - host_config - -# Puppet Containers Config directory used to generate container configs -- name: Create /var/lib/container-puppet - become: true - file: - path: /var/lib/container-puppet - state: directory - setype: container_file_t - selevel: s0 - tags: - - container_config - - container_config_tasks - -- name: Delete existing /var/lib/container-puppet/check-mode for check mode - become: true - file: - path: /var/lib/container-puppet/check-mode - state: absent - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - -- name: Create /var/lib/container-puppet/check-mode for check mode - become: true - file: - path: /var/lib/container-puppet/check-mode - state: directory - setype: container_file_t - selevel: s0 - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - -- name: Write container-puppet.json file - become: true - no_log: True - copy: - content: "{{ lookup('template', tripleo_role_name + '/puppet_config.yaml', errors='ignore') | default([], True) | from_yaml | to_nice_json }}" - dest: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json - force: true - mode: '0600' - tags: - - container_config - check_mode: false - diff: false - -- name: Diff container-puppet.json changes for check mode - command: - diff -uN /var/lib/container-puppet/container-puppet.json /var/lib/container-puppet/check-mode/container-puppet.json - register: diff_results - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff container-puppet.json changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - container_config - -- name: Ensure config hashes are up-to-date for container startup configs - become: true - container_puppet_config: - update_config_hash_only: true - tags: - - container_startup_configs - -- name: Set host puppet debugging fact string - set_fact: - host_puppet_config_debug: "--debug --verbose" - when: - - enable_puppet | bool - - enable_debug | bool - tags: - - host_config - -- name: Check for /etc/puppet/check-mode directory for check mode - stat: - path: /etc/puppet/check-mode - register: check_mode_dir - when: ansible_check_mode|bool - tags: - - host_config - - container_config - -- name: Create /etc/puppet/check-mode/hieradata directory for check mode - become: true - file: - path: /etc/puppet/check-mode/hieradata - state: directory - setype: container_file_t - selevel: s0 - recurse: true - check_mode: false - when: - - ansible_check_mode|bool - - not check_mode_dir.stat.exists - tags: - - host_config - - container_config - -- name: Create puppet check-mode files if they don't exist for check mode - become: true - shell: | - cp -a /etc/puppet/hiera.yaml /etc/puppet/check-mode/hiera.yaml - cp -a /etc/puppet/hieradata/* /etc/puppet/check-mode/hieradata/ - sed -i 's/\/etc\/puppet\/hieradata/\/etc\/puppet\/check-mode\/hieradata/' /etc/puppet/check-mode/hiera.yaml - when: - - ansible_check_mode|bool - - not check_mode_dir.stat.exists - check_mode: false - tags: - - host_config - - container_config diff --git a/common/deploy-steps-tasks.yaml b/common/deploy-steps-tasks.yaml deleted file mode 100644 index be695c91a0..0000000000 --- a/common/deploy-steps-tasks.yaml +++ /dev/null @@ -1,140 +0,0 @@ -##################################################### -# Per step puppet configuration of the baremetal host -##################################################### - -- name: Write the config_step hieradata - become: true - no_log: True - copy: - content: "{{ dict(step=step|int) | to_json }}" - dest: /etc/puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}hieradata/config_step.json - force: true - mode: '0600' - check_mode: false - tags: - - host_config - -- name: Run puppet host configuration for step {{ step }} - become: true - async: 3600 - poll: 0 - when: enable_puppet|bool - shell: >- - set -o pipefail; - puppet apply {{ host_puppet_config_debug | default('') }} - --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules - --detailed-exitcodes - --summarize - --color=false - {{ ansible_check_mode | bool | ternary('--noop', '') }} - {{ ansible_check_mode | bool | ternary('--hiera_config /etc/puppet/check-mode/hiera.yaml', '') }} - /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp - 2>&1 | logger -s -t puppet-user - register: puppet_host_async_result - no_log: true - tags: - - host_config - check_mode: false - -- name: Wait for puppet host configuration to finish - async_status: - jid: "{{ puppet_host_async_result.ansible_job_id }}" - register: puppet_host_outputs - until: puppet_host_outputs.finished - retries: 360 - delay: 10 - failed_when: - - (not puppet_host_outputs.finished) or (puppet_host_outputs.rc is defined and puppet_host_outputs.rc not in [0, 2]) - when: - - not (ansible_check_mode | bool) - - enable_puppet|bool - tags: - - host_config - -- name: "Debug output for task: Run puppet host configuration for step {{ step }}" - debug: - var: puppet_host_outputs.stdout_lines | default([]) | union(puppet_host_outputs.stderr_lines | default([])) - when: - - not ansible_check_mode|bool - - enable_puppet | bool - - puppet_host_outputs.rc is defined - changed_when: puppet_host_outputs.rc == 2 - failed_when: puppet_host_outputs.rc not in [0, 2] - tags: - - host_config - -####################################### -# Pre-cache facts for puppet containers -####################################### -# We don't want the pre-cache tasks to be skipped in dry-run so we force -# check_mode to "no". -# https://bugzilla.redhat.com/show_bug.cgi?id=1738529 -- name: Pre-cache facts for puppet containers - include_role: - name: tripleo_puppet_cache - tags: - - container_config - - container_config_tasks - -####################################### -# Generate config via puppet containers -####################################### - -- name: Include container-puppet tasks (generate config) during step 1 - include_tasks: generate-config-tasks.yaml - when: step|int == 1 - tags: - - container_config - -##################################### -# Per step starting of the containers -##################################### - -- name: Per step starting of the containers using tripleo-ansible - become: true - environment: - TRIPLEO_MINOR_UPDATE: '{{ tripleo_minor_update | default(false) }}' - block: - - name: "Manage containers for step {{ step }} with tripleo-ansible" - include_role: - name: tripleo_container_manage - vars: - tripleo_container_manage_concurrency: 5 - tripleo_container_manage_cli: "{{ container_cli }}" - tripleo_container_manage_config: "/var/lib/tripleo-config/container-startup-config/step_{{ step }}" - tripleo_container_manage_config_id: "tripleo_step{{ step }}" - tripleo_container_manage_debug: "{{ enable_debug | bool }}" - tripleo_container_manage_healthcheck_disabled: "{{ container_healthcheck_disabled | bool }}" - tripleo_container_manage_log_path: "{{ container_log_stdout_path }}" - tripleo_container_manage_config_patterns: '*.json' - tripleo_container_manage_check_puppet_config: true - tags: - - container_startup_configs - -######################################################## -# Bootstrap tasks - run any tasks that have been defined -######################################################## - -- name: "Clean container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}}" - become: true - file: - path: /var/lib/container-puppet/container-puppet-tasks{{step}}.json - state: absent - tags: - - container_config_tasks - -- name: Calculate container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}} - set_fact: - "{{'host_container_puppet_tasks_' ~ step}}": "{{lookup('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | union([item])}}" - loop: "{{container_puppet_tasks.get('step_' ~ step, [])}}" - when: (groups[item.service_name] | default ([]) | map('extract', hostvars, 'inventory_hostname') | sort | first | lower) == ansible_facts['hostname'] | lower - vars: - container_puppet_tasks: "{{ lookup('file', tripleo_role_name + '/container_puppet_tasks.yaml', errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_config_tasks - -- name: Include container-puppet tasks for step {{step}} - include_tasks: host-container-puppet-tasks.yaml - when: ('host_container_puppet_tasks_' ~ step) is defined - tags: - - container_config_tasks diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2 deleted file mode 100644 index c3abfeb2ce..0000000000 --- a/common/deploy-steps.j2 +++ /dev/null @@ -1,1330 +0,0 @@ -# certain initialization steps (run in a container) will occur -# on the role marked as primary controller or the first role listed -{%- set primary_role_name = roles[0].name -%} -{%- for role in roles if ('primary' in role.tags and 'controller' in role.tags) -%} - {%- if loop.first -%} - {%- set primary_role_name = role.name -%} - {%- endif -%} -{%- endfor -%} -# primary role is: {{primary_role_name}} -{% set deploy_steps_max = 6 -%} -{% set update_steps_max = 6 -%} -{% set external_update_steps_max = 2 -%} -{% set pre_upgrade_rolling_steps_max = 1 -%} -{% set upgrade_steps_max = 6 -%} -{% set external_upgrade_steps_max = 3 -%} -{% set post_upgrade_steps_max = 4 -%} -{% set post_update_steps_max = 4 -%} -{% set scale_steps_max = 1 -%} - -heat_template_version: wallaby - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - deployment_source_hosts: - default: 'Undercloud' - type: string - description: Host or hostgroup that runs the deployment - deployment_target_hosts: - default: '' - type: string - description: > - Host or hostgroup that consists of the target systems for the deployment. - Defaults to all hosts in the current Heat stack if not set. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - ConfigDebug: - default: false - description: Whether to run config management (e.g. Puppet) in debug mode. - type: boolean - HideSensitiveLogs: - default: true - type: boolean - description: > - Set it to false if you don't want to activate the no_log feature within - ansible modules. - EnablePuppet: - default: true - description: Whether to run the puppet (baremetal) deployment tasks. - type: boolean - EnablePaunch: - default: false - description: > - (DEPRECATED) Whether to run paunch during container deployment tasks. - type: boolean - DockerPuppetDebug: - type: boolean - default: false - description: Set to True to enable debug logging with Puppet Containers - DockerPuppetProcessCount: - type: number - default: 6 - description: Number of concurrent processes to use when running container-puppet to generate config files. - ContainerCli: - type: string - default: 'podman' - description: CLI tool used to manage containers. - constraints: - - allowed_values: ['podman'] - DockerPuppetMountHostPuppet: - type: boolean - default: true - description: Whether containerized puppet executions use modules from the baremetal host. Defaults to true. Can be set to false to consume puppet modules from containers directly. - ContainerLogStdoutPath: - type: string - description: Absolute path for container stdout output (Podman only) - default: /var/log/containers/stdouts - ContainerHealthcheckDisabled: - type: boolean - description: Whether or not we disable the container healthcheck. - default: false - SELinuxMode: - default: 'enforcing' - description: Configures SELinux mode - type: string - constraints: - - allowed_values: [ 'enforcing', 'permissive', 'disabled' ] -{% for role in roles %} - {{role.name}}Count: - description: Number of {{role.name}} nodes to deploy - type: number - default: {{role.CountDefault|default(0)}} -{% endfor %} - ServiceNetMapLower: - description: Mapping of service name to network name - type: json - default: {} - ValidateControllersIcmp: - default: true - description: Validation to ensure that all controllers can be reached with ICMP - type: boolean - ValidateGatewaysIcmp: - default: true - description: Validation to ensure that all gateways can be reached with ICMP - type: boolean - ValidateFqdn: - default: false - description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts. - type: boolean - PingTestGatewayIPsMap: - default: {} - description: A map of role name to list of gateway IP addresses for each network, used to ping test each gateway. - type: json - PingTestIpsMap: - default: '' - description: A map of role name to a space separated list of IP addresses used to ping test each available network interface. - type: json - NetworkSafeDefaults: - default: true - description: Allow to enable/disable safe networking defaults if os-net-config would fail to run with the provided config. - type: boolean - DeployArtifactURLs: - default: [] - description: A list of HTTP URLs containing deployment artifacts to be pulled. - Currently supports tarballs and RPM packages. - type: comma_delimited_list - DeployArtifactFILEs: - default: [] - description: A list of files containing deployment artifacts to be pushed. - Currently supports tarballs and RPM packages. - type: comma_delimited_list - HostsEntry: - default: [] - type: comma_delimited_list - description: A list of entries to be added to /etc/hosts on each node. - AnsibleHostVarsMap: - type: json - default: {} - ContainerCli: - type: string - default: 'podman' - description: CLI tool used to manage containers. - constraints: - - allowed_values: ['podman'] - EnabledServices: - default: [] - type: comma_delimited_list - ControlVirtualIP: - type: string - EnabledNetworks: - type: comma_delimited_list - NetVipMap: - type: json -{%- for network in networks if network.enabled|default(true) %} - {{network.name}}NetName: - default: {{network.name_lower}} - description: The name of the {{network.name_lower}} network. - type: string -{%- endfor %} - CloudNames: - type: json - EnableInternalTLS: - type: boolean - default: false - CloudDomain: - default: 'localdomain' - type: string - description: > - The DNS domain used for the hosts. This must match the - overcloud_domain_name configured on the undercloud. - NovaAdditionalCell: - default: false - description: Whether this is an cell additional to the default cell. - type: boolean - AllNodesExtraMapData: - default: {} - type: json - description: Map of extra data (hieradata) to set on each node. - UndercloudHostsEntries: - default: [] - description: > - List of undercloud hosts entries to be appended to /etc/hosts. The - value is populated with the HEAT_HOSTS entries on the undercloud by - tripleoclient when running deploy. - type: comma_delimited_list - ExtraHostsEntries: - default: [] - description: List of extra hosts entries to be appended to /etc/hosts - type: comma_delimited_list - VipHostsEntries: - default: [] - description: List of VIP (virtual IP) hosts entries to be appended to /etc/hosts - type: comma_delimited_list - KeystoneResourcesConfigs: - description: The keystone resources config. - type: json - default: {} - RootStackName: - description: The name of the stack/plan. - type: string - NetCidrMap: - description: Mapping of CIDRs to network name - type: json - default: {} - -parameter_groups: -- label: deprecated - description: | - The following parameters are deprecated and will be removed. They should not - be relied on for new deployments. If you have concerns regarding deprecated - parameters, please contact the TripleO development team on IRC or the - OpenStack mailing list. - parameters: - - EnablePaunch - -conditions: -{% for role in roles %} - {{role.name}}NonZero: - not: - equals: - - {get_param: {{role.name}}Count} - - 0 -{% endfor %} - -resources: - - PreDeployStepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, pre_deploy_step_tasks] - - [] - {%- endfor %} - - ExternalDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_deploy_tasks] - - [] - {%- endfor %} - - ExternalPostDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_post_deploy_tasks] - - [] - {%- endfor %} - - ScaleTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - get_param: [role_data, {{role.name}}, scale_tasks] - {%- endfor %} - - ExternalUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_update_tasks] - - [] - {%- endfor %} - - ExternalUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_upgrade_tasks] - - [] - {%- endfor %} - - BootstrapServerId: - type: OS::Heat::Value - properties: - value: - yaql: - # Use a constant string of "bootstrap_server_id" when there are no - # servers in the primary role, such as in the case when all - # Controllers are blacklisted. No server id's will match the string - # which is what we want when all are blacklisted. - expression: switch($.data = {} => "no_bootstrap_server", $.data != {} => $.data.items().orderBy($[0]).first()[1]) - data: {get_param: [servers, {{primary_role_name}}]} - - # Note, this should be the last step to execute configuration changes. - # Ensure that all ExtraConfigPost steps are executed - # after all the previous deployment steps. - ExtraConfigPost: - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: - map_merge: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - {get_param: [servers, {{role.name}}]} - - {} - {%- endfor %} - EndpointMap: {get_param: EndpointMap} - - # BEGIN CONFIG STEPS, only on roles -{%- for role in roles %} - # The {{role.name}}PostConfig steps are in charge of - # quiescing all services, i.e. in the Controller case, - # we should run a full service reload. - {{role.name}}PostConfig: - condition: {{role.name}}NonZero - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: ExtraConfigPost - properties: - servers: {get_param: servers} - input_values: - deploy_identifier: {get_param: DeployIdentifier} -{% endfor %} - -outputs: - RoleConfig: - description: Mapping of config data for all roles - value: - global_vars: - deploy_steps_max: {{deploy_steps_max}} - service_net_map: {get_param: ServiceNetMapLower} - validate_controllers_icmp: {get_param: ValidateControllersIcmp} - validate_gateways_icmp: {get_param: ValidateGatewaysIcmp} - validate_fqdn: {get_param: ValidateFqdn} - ping_test_gateway_ips: {get_param: PingTestGatewayIPsMap} - ping_test_ips: {get_param: PingTestIpsMap} - network_safe_defaults: {get_param: NetworkSafeDefaults} - deploy_artifact_urls: {get_param: DeployArtifactURLs} - deploy_artifact_files: {get_param: DeployArtifactFILEs} - hosts_entry: {get_param: HostsEntry} - primary_role_name: {{ primary_role_name }} - deploy_identifier: {get_param: DeployIdentifier} - container_cli: {get_param: ContainerCli} - enabled_services: {get_param: EnabledServices} - control_virtual_ip: {get_param: ControlVirtualIP} - enabled_networks: {get_param: EnabledNetworks} - net_vip_map: {get_param: NetVipMap} - nova_additional_cell: {get_param: NovaAdditionalCell} - hide_sensitive_logs: {get_param: HideSensitiveLogs} -{%- for network in networks if network.enabled|default(true) %} - {{network.name_lower}}_net_name: {get_param: {{network.name}}NetName} -{%- endfor %} - networks: -{%- for network in networks if network.enabled|default(true) %} - {{network.name}}: - name: {get_param: {{network.name}}NetName} - name_lower: {{ network.name_lower }} -{%- endfor %} - network_virtual_ips: - ctlplane: - ip_address: {get_param: [NetVipMap, ctlplane]} - index: 1 -{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %} - {{network.name_lower}}: - ip_address: {get_param: [NetVipMap, {get_param: {{network.name}}NetName}]} - index: {{loop.index + 1}} -{%- endfor %} - cloud_names: {get_param: CloudNames} - enable_internal_tls: {get_param: EnableInternalTLS} - cloud_domain: {get_param: CloudDomain} - all_nodes_extra_map_data: {get_param: AllNodesExtraMapData} - undercloud_hosts_entries: {get_param: UndercloudHostsEntries} - extra_hosts_entries: {get_param: ExtraHostsEntries} - vip_hosts_entries: {get_param: VipHostsEntries} - keystone_resources: {get_param: KeystoneResourcesConfigs} - net_cidr_map: {get_param: NetCidrMap} - common_deploy_steps_playbooks: {get_file: deploy-steps-playbooks-common.yaml} - common_deploy_steps_tasks: {get_file: deploy-steps-tasks.yaml} - common_container_config_scripts: {get_file: common-container-config-scripts.yaml} - hiera_steps_tasks: {get_file: hiera-steps-tasks.yaml} - deploy_steps_tasks_step_0: {get_file: deploy-steps-tasks-step-0.yaml} - common_container_setup_tasks: {get_file: common-container-setup-tasks.yaml} - common_deploy_steps_tasks_step_1: {get_file: deploy-steps-tasks-step-1.yaml} - container_puppet_script: {get_file: ./container-puppet.sh} - generate-config-tasks: {get_file: generate-config-tasks.yaml} - host-container-puppet-tasks: {get_file: host-container-puppet-tasks.yaml} - deploy_steps_0_playbook: -{% block deploy_steps_str_replace_params %} - str_replace: - params: - BOOTSTRAP_SERVER_ID: {get_attr: [BootstrapServerId, value]} - DEPLOY_SOURCE_HOST: {get_param: deployment_source_hosts} - DEPLOY_TARGET_HOST: - if: - - equals: - - {get_param: deployment_target_hosts} - - "" - - {get_param: RootStackName} - - {get_param: deployment_target_hosts} - DEPLOY_IDENTIFIER: {get_param: DeployIdentifier} - ENABLE_DEBUG: {get_param: ConfigDebug} - ENABLE_PUPPET: {get_param: EnablePuppet} - CONTAINER_CLI: {get_param: ContainerCli} - CONTAINER_LOG_STDOUT_PATH: {get_param: ContainerLogStdoutPath} - CONTAINER_HEALTHCHECK_DISABLED: {get_param: ContainerHealthcheckDisabled} - DOCKER_PUPPET_DEBUG: {get_param: DockerPuppetDebug} - DOCKER_PUPPET_PROCESS_COUNT: {get_param: DockerPuppetProcessCount} - DOCKER_PUPPET_MOUNT_HOST_PUPPET: {get_param: DockerPuppetMountHostPuppet} - SELINUX_MODE: {get_param: SELinuxMode} -{% endblock %} - template: | - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External deployment step 0 - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - step: 0 - enable_debug: ENABLE_DEBUG - tasks: - - name: External deployment step 0 - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment step 0' to resume from this task -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "external_deploy_steps_tasks_step0.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - tags: - - external - - external_deploy_steps - - step0 - - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - deploy_steps_selinux_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_linear - name: Manage SELinux - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Set selinux state - become: true - selinux: - policy: targeted - state: SELINUX_MODE - - - hosts: all - strategy: tripleo_linear - name: Generate /etc/hosts - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Configure Hosts Entries - include_role: - name: tripleo_hosts_entries - vars: - tripleo_hosts_entries_undercloud_hosts_entries: "{{ undercloud_hosts_entries }}" - tripleo_hosts_entries_extra_hosts_entries: "{{ extra_hosts_entries }}" - tripleo_hosts_entries_vip_hosts_entries: "{{ vip_hosts_entries }}" -{% endraw %} - deploy_steps_common_roles_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_linear - name: Common roles for TripleO servers - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - # pre_tasks run before any roles in a play, so we use it for the - # named debug task for --start-at-task. - pre_tasks: - - name: Common roles for TripleO servers - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Common roles for TripleO servers' to resume from this task - tasks: - - include_role: - name: tripleo_bootstrap - - include_role: - name: tripleo_ssh_known_hosts - tags: - - common_roles - deploy_steps_0_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Deploy step tasks for step 0 - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: 0 - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: deploy_steps_tasks_step_0.yaml - tags: - - overcloud - - deploy_steps - - step0 - deploy_steps_pre_network_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server pre network steps - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - tasks: -{% raw %} - - name: Server pre-network deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server pre-network deployments' to resume from this task - - import_tasks: hiera_steps_tasks.yaml - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['pre_network_' ~ tripleo_role_name]|default([]) }}" -{% endraw %} - tags: - - overcloud - - pre_deploy_steps - deploy_steps_network_deployments_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server network deployments - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Network Configuration - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Network Configuration' to resume from this task - - name: Run Network Config - import_role: - name: tripleo_network_config - vars: - tripleo_network_config_update: "{{ network_config_update }}" - tripleo_network_config_async_timeout: "{{ async_timeout | default(300) }}" - tripleo_network_config_async_poll: "{{ async_poll | default(3) }}" - tripleo_network_config_hide_sensitive_logs: false - tripleo_network_config_legacy_script: false - tripleo_network_config_safe_defaults: "{{ network_safe_defaults }}|bool" - tags: - - overcloud - - pre_deploy_steps - - network_deploy_steps -{% endraw %} - deploy_steps_network_validation_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server network validation - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Basic Network Validation - include_role: - name: tripleo_nodes_validation - vars: - tripleo_nodes_validation_validate_controllers_icmp: "{{ validate_controllers_icmp }}" - tripleo_nodes_validation_validate_gateway_icmp: "{{ validate_gateways_icmp }}" - tripleo_nodes_validation_validate_fqdn: "{{ validate_fqdn }}" - tripleo_nodes_validation_ping_test_ips: "{{ ping_test_ips.get(tripleo_role_name, '').split() | list | unique }}" - tripleo_nodes_validation_ping_test_gateway_ips: "{{ ping_test_gateway_ips.get(tripleo_role_name, []) }}" - tags: - - overcloud - - pre_deploy_steps - - network_deploy_steps -{% endraw %} - deploy_steps_pre_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server pre deployment steps - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Server pre deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server pre deployments' to resume from this task -{% raw %} - - import_tasks: hiera_steps_tasks.yaml - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['pre_deployments_' ~ tripleo_role_name]|default([]) }}" -{% endraw %} - tags: - - overcloud - - pre_deploy_steps - deploy_steps_host_prep_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Host prep steps - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: -{% raw %} - - name: Host prep steps - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Host prep steps' to resume from this task - - name: Deploy Artifacts - tripleo_push_artifacts: - artifact_urls: "{{ deploy_artifact_urls | default([]) }}" - artifact_paths: "{{ deploy_artifact_files | default([]) }}" - when: - - ((deploy_artifact_urls | default([]) | length) > 0) or - ((deploy_artifact_files | default([]) | length) > 0) - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/host_prep_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - overcloud - - host_prep_steps -{% endraw %} - deploy_steps_container_setup_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Overcloud container setup tasks - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: Overcloud container setup tasks - block: - - name: Overcloud container setup tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Overcloud container setup tasks' to resume from this task - - name: Write config data - include_tasks: common_container_setup_tasks.yaml - when: - - (deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) - tags: - - overcloud - - container_setup_tasks - deploy_steps_pre_deployment_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Pre Deployment Step Tasks - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: -{% raw %} - - name: Pre Deployment Step Tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Pre Deployment Step Tasks' to resume from this task - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/pre_deploy_step_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - overcloud - - pre_deploy_steps_tasks -{% endraw %} - deploy_steps_external_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | -{%- for step in range(1,deploy_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External deployment step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: External deployment step {{step}} - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment step {{step}}' to resume from this task -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "external_deploy_steps_tasks_step{{step}}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - tags: - - external - - external_deploy_steps - - step{{step}} - - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Deploy step tasks for {{step}} - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: Deploy step tasks for {{step}} - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Deploy step tasks for {{step}}' to resume from this task - - name: Write the config_step hieradata for the deploy step {{step}} tasks -{% raw %} - copy: - content: "{{ dict(step=step | int) | to_json }}" - dest: /etc/puppet/hieradata/config_step.json - force: true - mode: '0600' -{% endraw %} -{% if step == 1 %} - - name: Overcloud common bootstrap tasks for step 1 - block: - - name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists" - stat: - path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}" - register: container_startup_configs_json_stat - - name: Write config data at the start of step 1 - include_tasks: common_deploy_steps_tasks_step_1.yaml - when: - - ((deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or - not container_startup_configs_json_stat.stat.exists) -{% endif %} -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/deploy_steps_tasks_step{{ step }}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - - name: Overcloud common deploy step tasks {{step}} - block: - - name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists" - stat: - path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}" - register: container_startup_configs_json_stat - - include_tasks: common_deploy_steps_tasks.yaml - when: (deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or - (container_startup_configs_json_stat is defined and not container_startup_configs_json_stat.stat.exists) - tags: - - overcloud - - deploy_steps - - step{{step}} - -{%- endfor %} - deploy_steps_post_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server Post Deployments - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Server Post Deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server Post Deployments' to resume from this task -{% raw %} - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['post_deployments_' ~ tripleo_role_name]|default([]) }}" - tags: - - overcloud - - post_deploy_steps -{% endraw %} - deploy_steps_post_deploy_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_linear - name: External deployment Post Deploy tasks - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: External deployment Post Deploy tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment Post Deploy tasks' to resume from this task - - import_tasks: external_post_deploy_steps_tasks.yaml - tags: - - external - - external_deploy_steps - - external_post_deploy_steps - deploy_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: deploy_steps_0_playbook.yaml - - import_playbook: deploy_steps_selinux_playbook.yaml - - import_playbook: deploy_steps_common_roles_playbook.yaml - - import_playbook: deploy_steps_0_tasks_playbook.yaml - - import_playbook: deploy_steps_pre_network_playbook.yaml - - import_playbook: deploy_steps_network_deployments_playbook.yaml - - import_playbook: deploy_steps_network_validation_playbook.yaml - - import_playbook: deploy_steps_pre_deployment_playbook.yaml - - import_playbook: deploy_steps_host_prep_playbook.yaml - - import_playbook: deploy_steps_container_setup_tasks_playbook.yaml - - import_playbook: deploy_steps_pre_deployment_tasks_playbook.yaml - - import_playbook: deploy_steps_external_deployment_playbook.yaml - - import_playbook: deploy_steps_post_deployment_playbook.yaml - - import_playbook: deploy_steps_post_deploy_tasks_playbook.yaml - external_deploy_steps_tasks: {get_attr: [ExternalDeployTasks, value]} - external_post_deploy_steps_tasks: {get_attr: [ExternalPostDeployTasks, value]} - pre_deploy_step_tasks: {get_attr: [PreDeployStepTasks, value]} - update_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for role in roles %} - - hosts: {{role.name}} - name: Run update - become: true - serial: "{% raw %}{{ update_serial | default({% endraw %}{{ role.update_serial | default(1) }}{% raw %})}}{% endraw %}" - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tripleo_minor_update: true - tripleo_redhat_enforce: {{role.rhsm_enforce|default("true")}} - tasks: - - import_tasks: hiera_steps_tasks.yaml - - import_tasks: common_container_config_scripts.yaml -{%- for step in range(0,update_steps_max) %} - - import_tasks: "{{ role.name }}/update_tasks_step{{ step }}.yaml" - vars: - step: "{{ step }}" -{%- endfor %} -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/host_prep_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - - import_tasks: deploy_steps_tasks_step_0.yaml - vars: - step: 0 - - name: Make sure container configuration is refreshed - import_tasks: common_container_setup_tasks.yaml - - name: Write config data at the start of step 1 - import_tasks: common_deploy_steps_tasks_step_1.yaml -{%- for step in range(1,deploy_steps_max) %} - - import_tasks: common_deploy_steps_tasks.yaml - vars: - step: "{{ step }}" -{%- endfor %} -{%- for step in range(0,post_update_steps_max) %} - - import_tasks: "{{ role.name }}/post_update_tasks_step{{ step }}.yaml" - vars: - step: "{{ step }}" -{%- endfor %} -{%- endfor %} - external_update_steps_tasks: {get_attr: [ExternalUpdateTasks, value]} - external_update_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(external_update_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - name: External update step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_update_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_update_steps -{%- endfor %} -{%- for step in range(1,deploy_steps_max) %} - # putting both update and deploy tasks in the same - # playbook allows influencing the deploy tasks by - # variables "exported" from update tasks - - hosts: DEPLOY_SOURCE_HOST - name: External deploy step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_deploy_steps_tasks.yaml - tags: - - external - - external_deploy_steps - - step{{step}} -{%- endfor %} - pre_upgrade_rolling_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/pre_upgrade_rolling_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - pre_upgrade_rolling_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | -{%- for role in roles %} - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST:{{role.name}}" - deploy_target_host: "DEPLOY_TARGET_HOST" - - hosts: {{role.name}} - name: Run pre-upgrade rolling tasks - serial: {{ role.deploy_serial | default(1) }} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - include_tasks: pre_upgrade_rolling_steps_tasks.yaml - with_sequence: start=0 end={{pre_upgrade_rolling_steps_max-1}} - loop_control: - loop_var: step -{%- endfor %} - upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(0,upgrade_steps_max) %} - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Upgrade tasks for step {{step}} - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - tasks: -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/upgrade_tasks_step{{ step }}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - tags: - - upgrade_steps - - upgrade_step{{step}} - -{%- endfor %} - post_upgrade_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/post_upgrade_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - post_upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_free - any_errors_fatal: true - tasks: - - include_tasks: post_upgrade_steps_tasks.yaml - with_sequence: start=0 end={{post_upgrade_steps_max-1}} - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - loop_control: - loop_var: step - external_upgrade_steps_tasks: {get_attr: [ExternalUpgradeTasks, value]} - external_upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(external_upgrade_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External upgrade step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - # Explicit ansible_python_interpreter to allow connecting - # to different OS releases (EL7/8) while using delegate_to. - ansible_python_interpreter: /usr/libexec/platform-python - step: '{{step}}' - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_upgrade_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_upgrade_steps -{%- endfor %} -{%- for step in range(1,deploy_steps_max) %} - # putting both upgrade and deploy tasks in the same - # playbook allows influencing the deploy tasks by - # variables "exported" from upgrade tasks - - hosts: DEPLOY_SOURCE_HOST - name: External deploy step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - # Explicit ansible_python_interpreter to allow connecting - # to different OS releases (EL7/8) while using delegate_to. - ansible_python_interpreter: /usr/libexec/platform-python - step: '{{step}}' - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_deploy_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_deploy_steps -{%- endfor %} - scale_steps_tasks: {get_attr: [ScaleTasks, value]} - scale_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - # Collect the facts from the overcloud nodes but ignore unreachable - # nodes in the case of a dead node which needs to be part of the - # scale-down operation. - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - - - hosts: DEPLOY_TARGET_HOST - name: Scaling - # NOTE(cloudnull): This is set to true explicitly so that we have up-to-date facts - # on all DEPLOY_TARGET_HOST when performing a scaling operation. - # Without up-to-date facts, we're creating a potential failure - # scenario. - gather_facts: true - ignore_unreachable: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - # Only run the scale tasks on the nodes that are alive. - # We expect the tasks to work on the nodes that are alive. - # If a task is allowed to fail, it needs to be configured at - # the task definition level but not here. - - include_tasks: scale_steps_tasks.yaml - with_sequence: start=1 end={{scale_steps_max}} - loop_control: - loop_var: step - tags: always - # we use ansible_facts['hostname'] to determine if the host is alive - # or not. - when: ansible_facts['hostname'] is defined - # We don't want to run the scale tasks on dead nodes, to allow - # the operator to scale down the cloud no matter the state of - # the servers. - # However, we notify the operator if the node wasn't reachable. - # Using fail and not debug module to make it more visible - # in the logs. - - fail: - msg: "Node is unreachable. No scale tasks will be run." - ignore_errors: True - tags: always - # we use ansible_facts['hostname'] to determine if the host is alive - # or not. - when: ansible_facts['hostname'] is not defined - tags: - - scale - post_update_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/post_update_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} diff --git a/common/generate-config-tasks.yaml b/common/generate-config-tasks.yaml deleted file mode 100644 index 0e80d3c7d2..0000000000 --- a/common/generate-config-tasks.yaml +++ /dev/null @@ -1,54 +0,0 @@ -- name: Block for container-puppet tasks (generate config) during step {{ step }} with tripleo-ansible - become: true - tags: - - container_config - block: - - name: Create base directory puppet configs - file: - path: "/var/lib/tripleo-config/container-puppet-config" - mode: 0700 - recurse: true - setype: container_file_t - - - name: "Generate container puppet configs for step {{ step }}" - container_puppet_config: - check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}' - config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}" - debug: "{{ docker_puppet_debug | bool }}" - net_host: true - no_archive: false - puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json" - short_hostname: "{{ ansible_facts['hostname'] | lower }}" - step: "{{ step }}" - - - name: "Manage Puppet containers (generate config) for step {{ step }}" - tripleo_container_manage: - config_id: "tripleo_puppet_step{{ step }}" - config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}" - config_patterns: "container-puppet-*.json" - concurrency: "{{ docker_puppet_process_count }}" - -- name: Diff puppet-generated changes for check mode - become: true - shell: | - diff -ruN --no-dereference -q /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated - diff -ruN --no-dereference /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated - register: diff_results - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff puppet-generated changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - container_config diff --git a/common/hiera-steps-tasks.yaml b/common/hiera-steps-tasks.yaml deleted file mode 100644 index 1cc20b9f25..0000000000 --- a/common/hiera-steps-tasks.yaml +++ /dev/null @@ -1,31 +0,0 @@ -- name: Hiera config - include_role: - name: tripleo_hieradata -- name: Hiera symlink - become: true - file: - src: /etc/puppet/hiera.yaml - dest: /etc/hiera.yaml - state: link - force: true -- name: Hieradata from vars - include_role: - name: tripleo_hieradata - tasks_from: hieradata_vars.yaml - vars: - hieradata_templates_list: - - bootstrap_node - - all_nodes - - vip_data - - net_ip_map - - cloud_domain - - fqdn - - service_names - - service_configs - - extraconfig - - role_extraconfig - - ovn_chassis_mac_map -- name: Ensure ansible_managed hieradata file exists - include_role: - name: tripleo_hieradata - tasks_from: ansible_hieradata.yml diff --git a/common/host-container-puppet-tasks.yaml b/common/host-container-puppet-tasks.yaml deleted file mode 100644 index bde0a16a70..0000000000 --- a/common/host-container-puppet-tasks.yaml +++ /dev/null @@ -1,33 +0,0 @@ -- name: Write container-puppet-tasks json file for {{ansible_facts['hostname'] | lower}} step {{step}} - no_log: True - copy: - content: "{{lookup ('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | to_nice_json}}" - dest: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{step}}.json" - force: true - mode: '0600' - check_mode: false - tags: - - container_config_tasks - -- name: Block for container-puppet tasks (bootstrap tasks) for step {{ step }} with tripleo-ansible - tags: - - container_config_tasks - block: - - name: "Generate container puppet configs for step {{ step }}" - container_puppet_config: - check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}' - config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}" - debug: "{{ docker_puppet_debug | bool }}" - net_host: true - no_archive: true - puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{ step }}.json" - short_hostname: "{{ ansible_facts['hostname'] | lower }}" - step: "{{ step }}" - - - name: "Manage Puppet containers (bootstrap tasks) for step {{ step }}" - tripleo_container_manage: - config_id: "tripleo_puppet_step{{ step }}" - config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}" - config_patterns: 'container-puppet-*.json' - debug: "{{ docker_puppet_debug | bool }}" - concurrency: "{{ docker_puppet_process_count }}" diff --git a/common/post.j2.yaml b/common/post.j2.yaml deleted file mode 100644 index 8a70dfa97b..0000000000 --- a/common/post.j2.yaml +++ /dev/null @@ -1 +0,0 @@ -{% include 'deploy-steps.j2' %} diff --git a/common/services/role.role.j2.yaml b/common/services/role.role.j2.yaml deleted file mode 100644 index fee2ac8215..0000000000 --- a/common/services/role.role.j2.yaml +++ /dev/null @@ -1,443 +0,0 @@ -heat_template_version: wallaby - -description: > - Utility stack to convert an array of services into a set of combined - role configs. - -parameters: - Services: - default: [] - description: | - List nested stack service templates. - type: comma_delimited_list - ServiceData: - default: {} - description: Dictionary packing service data - type: json - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. Use - parameter_merge_strategies to merge it with the defaults. - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - RoleName: - default: '' - description: Role name on which the service is applied - type: string - RoleParameters: - description: Parameters specific to the role - default: {} - type: json - -resources: - - ServiceChain: - type: OS::Heat::ResourceChain - properties: - resources: {get_param: Services} - concurrent: true - resource_properties: - ServiceData: {get_param: ServiceData} - ServiceNetMap: {get_param: ServiceNetMap} - EndpointMap: {get_param: EndpointMap} - RoleName: {get_param: RoleName} - RoleParameters: {get_param: RoleParameters} - - ServiceServerMetadataHook: - type: OS::TripleO::{{role.name}}ServiceServerMetadataHook - properties: - RoleData: {get_attr: [ServiceChain, role_data]} - - PuppetStepConfig: - type: OS::Heat::Value - properties: - type: string - value: - yaql: - expression: - # select 'step_config' only from services that do not have a docker_config - coalesce($.data.service_names, []).zip(coalesce($.data.step_config, []), coalesce($.data.docker_config, [])).where($[2] = null).where($[1] != null).select($[1]).join("\n") - data: - service_names: {get_attr: [ServiceChain, role_data, service_name]} - step_config: {get_attr: [ServiceChain, role_data, step_config]} - docker_config: {get_attr: [ServiceChain, role_data, docker_config]} - - DockerConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'docker_config' only from services that have it - coalesce($.data.service_names, []).zip(coalesce($.data.docker_config, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_attr: [ServiceChain, role_data, service_names]} - docker_config: {get_attr: [ServiceChain, role_data, docker_config]} - - DockerConfigScripts: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'container_config_scripts' only from services that have it - coalesce($.data.service_names, []).zip(coalesce($.data.container_config_scripts, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_attr: [ServiceChain, role_data, service_names]} - container_config_scripts: {get_attr: [ServiceChain, role_data, container_config_scripts]} - - MonitoringSubscriptionsConfig: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('monitoring_subscription')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - ServiceNames: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - filter: - - [null] - - {get_attr: [ServiceChain, role_data, service_name]} - - GlobalConfigSettings: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('global_config_settings')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - KeystoneResourcesConfigs: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('keystone_resources')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - CoreServices: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('core_services')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - ServiceConfigSettings: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: coalesce($.data.role_data, []).where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - DeployStepsTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('deploy_steps_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_deploy_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalPostDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_post_deploy_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ScaleTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('scale_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PreUpgradeRollingTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('pre_upgrade_rolling_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PostUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('post_upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PostUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpgradeBatchTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PuppetConfig: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('puppet_config')).where($ != null).distinct() - data: {get_attr: [ServiceChain, role_data]} - - KollaConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: coalesce($.data.role_data, []).where($ != null).select($.get('kolla_config')).where($ != null).reduce($1.mergeWith($2), {}) - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - # DEPRECATED in favor of ContainerPuppetTasks. - DockerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - # This extracts docker_puppet_tasks for each step, - # and merges {service_name: the_service} for each item, so we - # can later filter based on the bootstrap hostname for the service - expression: dict(coalesce($.data, []).where($ != null).select([$.get('docker_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1])) - data: {get_attr: [ServiceChain, role_data]} - - ContainerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - # This extracts container_puppet_tasks for each step, - # and merges {service_name: the_service} for each item, so we - # can later filter based on the bootstrap hostname for the service - expression: dict(coalesce($.data, []).where($ != null).select([$.get('container_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1])) - data: {get_attr: [ServiceChain, role_data]} - - HostFirewallTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('host_firewall_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - HostPrepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PreDeployStepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('pre_deploy_step_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - AnsibleGroupVars: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('ansible_group_vars')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - FirewallRules: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - FirewallFrontendRules: - type: OS::Heat::Value - properties: - type: json - value: - frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - ssl_frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_ssl_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - edge_frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - edge_sslfrontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_ssl_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - -outputs: - role_data: - description: Combined Role data for this set of services. - value: - service_names: {get_attr: [ServiceNames, value]} - monitoring_subscriptions: {get_attr: [MonitoringSubscriptionsConfig, value]} - config_settings: - yaql: - expression: $.data.where($ != null).reduce($1.mergeWith($2), {}) - data: {get_attr: [ServiceChain, role_data, config_settings]} - global_config_settings: {get_attr: [GlobalConfigSettings, value]} - service_config_settings: {get_attr: [ServiceConfigSettings, value]} - step_config: {get_attr: [PuppetStepConfig, value]} - deploy_steps_tasks: {get_attr: [DeployStepsTasks, value]} - external_deploy_tasks: {get_attr: [ExternalDeployTasks, value]} - external_post_deploy_tasks: {get_attr: [ExternalPostDeployTasks, value]} - external_update_tasks: {get_attr: [ExternalUpdateTasks, value]} - external_upgrade_tasks: {get_attr: [ExternalUpgradeTasks, value]} - scale_tasks: {get_attr: [ScaleTasks, value]} - pre_upgrade_rolling_tasks: {get_attr: [PreUpgradeRollingTasks, value]} - upgrade_tasks: {get_attr: [UpgradeTasks, value]} - post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]} - update_tasks: {get_attr: [UpdateTasks, value]} - post_update_tasks: {get_attr: [PostUpdateTasks, value]} - upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]} - service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]} - ansible_group_vars: - map_merge: - - ansible_async_dir: "/tmp/.ansible_async" - - {get_attr: [AnsibleGroupVars, value]} - keystone_resources: {get_attr: [KeystoneResourcesConfigs, value]} - - # Firewall rules - firewall_rules: {get_attr: [FirewallRules, value]} - firewall_frontend_rules: {get_attr: [FirewallFrontendRules, value, frontend]} - firewall_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, ssl_frontend]} - firewall_edge_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_frontend]} - firewall_edge_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_ssl_frontend]} - - # Keys to support docker/services - puppet_config: {get_attr: [PuppetConfig, value]} - kolla_config: {get_attr: [KollaConfig, value]} - docker_config: {get_attr: [DockerConfig, value]} - container_config_scripts: {get_attr: [DockerConfigScripts, value]} - container_puppet_tasks: - map_merge: - - {get_attr: [ContainerPuppetTasks, value]} - - {get_attr: [DockerPuppetTasks, value]} - host_prep_tasks: - list_concat: - - {get_attr: [HostFirewallTasks, value]} - - {get_attr: [HostPrepTasks, value]} - pre_deploy_step_tasks: {get_attr: [PreDeployStepTasks, value]} diff --git a/config-download-software.yaml b/config-download-software.yaml deleted file mode 100644 index 3b60f7322e..0000000000 --- a/config-download-software.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: wallaby - -parameters: - - name: - type: string - default: "" - - actions: - type: comma_delimited_list - default: ['CREATE', 'UPDATE'] - - server: - type: string - - config: - type: string - - input_values: - type: json - description: input values for the software deployments - default: {} - - input_key: - type: string - default: 'get_input' - - signal_transport: - type: string - default: 'CFN_SIGNAL' - - input_values_validate: - type: string - default: 'LAX' - -resources: - - TripleODeployment: - type: OS::Heat::Value - properties: - value: - name: {get_param: name} - server: {get_param: server} - config: {get_param: config} - input_values: {get_param: input_values} - deployment: {get_resource: TripleOSoftwareDeployment} - - TripleOSoftwareDeployment: - type: OS::Heat::SoftwareDeployment - properties: - name: deployment_resource - config: {get_param: config} - server: 'fake_server_id' - input_values: {get_param: input_values} - signal_transport: NO_SIGNAL - actions: {get_param: actions} - -outputs: - - deploy_status_code: - value: 0 - deploy_stderr: - value: '' - deploy_stdout: - value: '' - show: - value: '' - - update_managed_packages: - description: boolean value indicating whether to upgrade managed packages - value: false - hostname: - description: hostname - value: '' diff --git a/config-download-structured.yaml b/config-download-structured.yaml deleted file mode 100644 index 0f1d812a5f..0000000000 --- a/config-download-structured.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: wallaby - -parameters: - - name: - type: string - default: "" - - actions: - type: comma_delimited_list - default: ['CREATE', 'UPDATE'] - - server: - type: string - - config: - type: string - - input_values: - type: json - description: input values for the software deployments - default: {} - - input_key: - type: string - default: 'get_input' - - signal_transport: - type: string - default: 'CFN_SIGNAL' - - input_values_validate: - type: string - default: 'LAX' - -resources: - - TripleODeployment: - type: OS::Heat::Value - properties: - value: - name: {get_param: name} - server: {get_param: server} - config: {get_param: config} - input_values: {get_param: input_values} - deployment: {get_resource: TripleOSoftwareDeployment} - - TripleOSoftwareDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: deployment_resource - config: {get_param: config} - server: 'fake_server_id' - input_values: {get_param: input_values} - signal_transport: NO_SIGNAL - actions: {get_param: actions} - -outputs: - - deploy_status_code: - value: 0 - deploy_stderr: - value: '' - deploy_stdout: - value: '' - show: - value: '' - - update_managed_packages: - description: boolean value indicating whether to upgrade managed packages - value: false - hostname: - description: hostname - value: '' diff --git a/container_config_scripts/__init__.py b/container_config_scripts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/container_config_scripts/monitoring/collectd_check_health.py b/container_config_scripts/monitoring/collectd_check_health.py deleted file mode 100755 index 8331c6af0a..0000000000 --- a/container_config_scripts/monitoring/collectd_check_health.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2022 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import shutil -import subprocess -import sys - - -SOCKET = "unix:/run/podman/podman.sock" -FORMAT = ("{service: .Name, container: .Id, status: .State.Running, " - "healthy: .State.Health.Status}") -SKIP_LIST = ['_bootstrap', 'container-puppet-', '_db_sync', - '_ensure_', '_fix_', '_init_', '_map_', '_wait_', - 'mysql_data_ownership', 'configure_cms_options'] - - -def execute(cmd, workdir: str = None, - prev_proc: subprocess.Popen = None) -> subprocess.Popen: - # Note(mmagr): When this script is executed by collectd-sensubility started - # via collectd the script has non-root permission but inherits - # environment from collectd with root permission. We need - # to avoid sensubility access /root when using podman-remote. - # See https://bugzilla.redhat.com/show_bug.cgi?id=2091076 for - # more info. - proc_env = os.environ.copy() - proc_env["HOME"] = "/tmp" - if type(cmd[0]) is list: # multiple piped commands - last = prev_proc - for c in cmd: - last = execute(c, workdir, last) - return last - else: # single command - inpipe = prev_proc.stdout if prev_proc is not None else None - proc = subprocess.Popen(cmd, cwd=workdir, env=proc_env, stdin=inpipe, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if prev_proc is not None: - prev_proc.stdout.close() - prev_proc.stderr.close() - return proc - - -def fetch_container_health(containers): - out = [] - for cont in set(containers.split('\n')) - set(SKIP_LIST): - if not cont: - continue - proc = execute([ - [shutil.which('podman-remote'), - '--url', SOCKET, 'inspect', cont], - [shutil.which('jq'), '.[] | %s' % FORMAT] - ]) - o, e = proc.communicate() - if proc.returncode != 0: - msg = "Failed to fetch status of %s: %s" % (cont, e.decode()) - return proc.returncode, msg - - item = json.loads(o.decode()) - if len(item['healthy']) > 0: - item['status'] = item['healthy'] - else: - item['status'] = 'running' if item['status'] else 'stopped' - - item['healthy'] = int(item['healthy'] == 'healthy') - out.append(item) - return 0, out - - -if __name__ == "__main__": - proc = execute([shutil.which('podman-remote'), '--url', SOCKET, - 'ps', '--all', '--format', '{{.Names}}']) - o, e = proc.communicate() - if proc.returncode != 0: - print("Failed to list containers:\n%s\n%s" % (o.decode(), e.decode())) - sys.exit(1) - - rc, status = fetch_container_health(o.decode()) - if rc != 0: - print("Failed to inspect containers:\n%s" % status) - sys.exit(rc) - print(json.dumps(status)) diff --git a/container_config_scripts/mysql_upgrade_db.sh b/container_config_scripts/mysql_upgrade_db.sh deleted file mode 100755 index 610bf1307a..0000000000 --- a/container_config_scripts/mysql_upgrade_db.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -e - -# Wait until we know the mysql server is up and responding -timeout ${DB_MAX_TIMEOUT:-60} /bin/bash -c 'until mysqladmin -uroot ping 2>/dev/null; do sleep 1; done' - -# After an upgrade, make sure that the running mysql had a chance to -# update its data table on disk. -mysql_upgrade - -# Upgrade to 10.3: the default table row format changed from COMPACT -# to DYNAMIC, so upgrade the existing tables. -compact_tables=$(mysql -se 'SELECT CONCAT("`",TABLE_SCHEMA,"`.`",TABLE_NAME,"`") FROM information_schema.tables WHERE ENGINE = "InnoDB" and ROW_FORMAT = "Compact";'); -for i in $compact_tables; do echo converting row format of table $i; mysql -e "ALTER TABLE $i ROW_FORMAT=DYNAMIC;"; done; diff --git a/container_config_scripts/nova_api_ensure_default_cells.py b/container_config_scripts/nova_api_ensure_default_cells.py deleted file mode 100644 index 8e41e3eb7b..0000000000 --- a/container_config_scripts/nova_api_ensure_default_cells.py +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2022 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from configparser import ConfigParser -import logging -import os -import subprocess -import sys -from urllib import parse as urlparse - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -logging.basicConfig(stream=sys.stdout, level=loglevel) -LOG = logging.getLogger('nova_api_ensure_default_cells') - -NOVA_CFG = '/etc/nova/nova.conf' -CELL0_ID = '00000000-0000-0000-0000-000000000000' -DEFAULT_CELL_NAME = 'default' - - -def template_netloc_credentials(netloc, index=None): - if '@' in netloc: - userpass, hostport = netloc.split('@', 1) - has_pass = ':' in userpass - if index is None: - cred_template = '{username}' - if has_pass: - cred_template += ':{password}' - else: - cred_template = '{{username{index}}}'.format(index=index) - if has_pass: - cred_template += ':{{password{index}}}'.format(index=index) - return '@'.join((cred_template, hostport)) - else: - return netloc - - -def template_url(url): - parsed = urlparse.urlparse(url) - if ',' in parsed.netloc: - orig_netlocs = parsed.netloc.split(',') - templ_netlocs = [] - index = 0 - for netloc in orig_netlocs: - index += 1 - templ_netlocs.append(template_netloc_credentials(netloc, index)) - new_netloc = ','.join(templ_netlocs) - else: - new_netloc = template_netloc_credentials(parsed.netloc) - return parsed._replace(netloc=new_netloc).geturl() - - -def parse_list_cells(list_cells_output): - list_cells_lines = list_cells_output.split('\n') - if len(list_cells_lines) < 5: - raise ValueError('Invalid nova-manage cell_v2 list_cells output') - - data_rows = list_cells_lines[3:-2] - by_name = {} - by_uuid = {} - - for row in data_rows: - parts = row.split('|') - entry = { - 'name': parts[1].strip(), - 'uuid': parts[2].strip(), - 'transport_url': parts[3].strip(), - 'database_connection': parts[4].strip(), - } - by_name[entry['name']] = entry - by_uuid[entry['uuid']] = entry - - return by_name, by_uuid - - -def create_or_update_default_cells(cell0_db, default_db, default_transport_url): - list_cells_cmd = ['/usr/bin/nova-manage', 'cell_v2', 'list_cells', '--verbose'] - list_cells_output = subprocess.check_output(list_cells_cmd, encoding='utf-8') - cells_by_name, cells_by_uuid = parse_list_cells(list_cells_output) - - if CELL0_ID in cells_by_uuid: - LOG.info('Setting cell0 database connection to \'{}\''.format(cell0_db)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'update_cell', - '--cell_uuid', CELL0_ID, - '--database_connection', cell0_db, - '--transport-url', 'none:///' - ] - else: - LOG.info('Creating cell0 with database connection \'{}\''.format(cell0_db)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'map_cell0', - '--database_connection', cell0_db - ] - subprocess.check_call(cmd) - - if DEFAULT_CELL_NAME in cells_by_name: - LOG.info('Setting default cell database connection to \'{}\' and transport url to \'{}\''.format( - default_db, default_transport_url)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'update_cell', - '--cell_uuid', cells_by_name[DEFAULT_CELL_NAME]['uuid'], - '--database_connection', default_db, - '--transport-url', default_transport_url - ] - else: - LOG.info('Creating default cell with database connection \'{}\' and transport url \'{}\''.format( - default_db, default_transport_url)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'create_cell', - '--name', DEFAULT_CELL_NAME, - '--database_connection', default_db, - '--transport-url', default_transport_url - ] - subprocess.check_call(cmd) - - -def replace_db_name(db_url, db_name): - return urlparse.urlparse(db_url)._replace(path=db_name).geturl() - - -if __name__ == '__main__': - if os.path.isfile(NOVA_CFG): - try: - config.read(NOVA_CFG) - except Exception: - LOG.exception('Error while reading nova.conf:') - sys.exit(1) - else: - LOG.error('Nova configuration file %s does not exist', NOVA_CFG) - sys.exit(1) - - default_database_connection = config.get('database', 'connection') - cell0_database_connection = replace_db_name(default_database_connection, 'nova_cell0') - default_transport_url = config.get('DEFAULT', 'transport_url') - - create_or_update_default_cells( - template_url(cell0_database_connection), - template_url(default_database_connection), - template_url(default_transport_url) - ) diff --git a/container_config_scripts/nova_libvirt_init_secret.sh b/container_config_scripts/nova_libvirt_init_secret.sh deleted file mode 100755 index 52f9e491ea..0000000000 --- a/container_config_scripts/nova_libvirt_init_secret.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -set -e - -CEPH_INFO=($*) - -if [ -z "$CEPH_INFO" ]; then - echo "error: At least one CLUSTER:CLIENT tuple must be specified" - exit 1 -fi - -echo "------------------------------------------------" -echo "Initializing virsh secrets for: ${CEPH_INFO[@]}" - -for INFO in ${CEPH_INFO[@]}; do - IFS=: read CLUSTER CLIENT <<< $INFO - if [ ! -f /etc/ceph/${CLUSTER}.conf ]; then - echo "Error: /etc/ceph/${CLUSTER}.conf was not found" - echo "Path to nova_libvirt_init_secret was ${CEPH_INFO}" - exit 1 - fi - FSID=$(awk '$1 == "fsid" {print $3}' /etc/ceph/${CLUSTER}.conf) - if [ -z "${FSID}" ]; then - echo "Error: /etc/ceph/${CLUSTER}.conf contained an empty fsid definition" - echo "Check your ceph configuration" - exit 1 - fi - - echo "--------" - echo "Initializing the virsh secret for '$CLUSTER' cluster ($FSID) '$CLIENT' client" - - # Ensure the secret XML file exists. Puppet should have created a secret.xml - # file for the first cluster's secret, so detect when to use that file. - if grep -q $FSID /etc/nova/secret.xml; then - SECRET_FILE="/etc/nova/secret.xml" - SECRET_NAME="client.${CLIENT} secret" - else - SECRET_FILE="/etc/nova/${CLUSTER}-secret.xml" - SECRET_NAME="${CLUSTER}.client.${CLIENT} secret" - fi - - if [ ! -f $SECRET_FILE ]; then - echo "Creating $SECRET_FILE" - cat < $SECRET_FILE - - - ${SECRET_NAME} - - ${FSID} - -EOF - else - echo "The $SECRET_FILE file already exists" - fi - - # Ensure the libvirt secret is defined - if /usr/bin/virsh secret-list | grep -q $FSID; then - echo "The virsh secret for $FSID has already been defined" - else - /usr/bin/virsh secret-define --file $SECRET_FILE - fi - - # Fetch the key from the keyring and ensure the secret is set - KEY=$(awk '$1 == "key" {print $3}' /etc/ceph/${CLUSTER}.client.${CLIENT}.keyring) - if /usr/bin/virsh secret-get-value $FSID 2>/dev/null | grep -q $KEY; then - echo "The virsh secret for $FSID has already been set" - else - /usr/bin/virsh secret-set-value --secret $FSID --base64 $KEY - fi -done diff --git a/container_config_scripts/nova_statedir_ownership.py b/container_config_scripts/nova_statedir_ownership.py deleted file mode 100644 index 2978a32f4d..0000000000 --- a/container_config_scripts/nova_statedir_ownership.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import os -import pwd -import selinux -import stat -import sys - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -logging.basicConfig(stream=sys.stdout, level=loglevel) -LOG = logging.getLogger('nova_statedir') - - -class PathManager(object): - """Helper class to manipulate ownership of a given path""" - def __init__(self, path): - self.path = path - self.uid = None - self.gid = None - self.is_dir = None - self.secontext = None - self._update() - - def _update(self): - try: - statinfo = os.stat(self.path) - self.is_dir = stat.S_ISDIR(statinfo.st_mode) - self.uid = statinfo.st_uid - self.gid = statinfo.st_gid - self.secontext = selinux.lgetfilecon(self.path)[1] - except Exception: - LOG.exception('Could not update metadata for %s', self.path) - raise - - def __str__(self): - return "uid: {} gid: {} path: {}{}".format( - self.uid, - self.gid, - self.path, - '/' if self.is_dir else '' - ) - - def has_owner(self, uid, gid): - return self.uid == uid and self.gid == gid - - def has_either(self, uid, gid): - return self.uid == uid or self.gid == gid - - def chown(self, uid, gid): - target_uid = -1 - target_gid = -1 - if self.uid != uid: - target_uid = uid - if self.gid != gid: - target_gid = gid - if (target_uid, target_gid) != (-1, -1): - LOG.info('Changing ownership of %s from %d:%d to %d:%d', - self.path, - self.uid, - self.gid, - self.uid if target_uid == -1 else target_uid, - self.gid if target_gid == -1 else target_gid) - try: - os.chown(self.path, target_uid, target_gid) - self._update() - except Exception: - LOG.exception('Could not change ownership of %s: ', - self.path) - raise - else: - LOG.info('Ownership of %s already %d:%d', - self.path, - uid, - gid) - - def chcon(self, context): - # If dir returns whether to recursively set context - try: - try: - selinux.lsetfilecon(self.path, context) - LOG.info('Setting selinux context of %s to %s', - self.path, context) - return True - except OSError as e: - if self.is_dir and e.errno == 95: - # Operation not supported, assume NFS mount and skip - LOG.info('Setting selinux context not supported for %s', - self.path) - return False - else: - raise - except Exception: - LOG.exception('Could not set selinux context of %s to %s:', - self.path, context) - raise - - -class NovaStatedirOwnershipManager(object): - """Class to manipulate the ownership of the nova statedir (/var/lib/nova). - - The nova uid/gid differ on the host and container images. An upgrade - that switches from host systemd services to docker requires a change in - ownership. Previously this was a naive recursive chown, however this - causes issues if nova instance are shared via an NFS mount: any open - filehandles in qemu/libvirt fail with an I/O error (LP1778465). - - Instead the upgrade/FFU ansible tasks now lay down a marker file when - stopping and disabling the host systemd services. We use this file to - determine the host nova uid/gid. We then walk the tree and update any - files that have the host uid/gid to the docker nova uid/gid. As files - owned by root/qemu etc... are ignored this avoids the issues with open - filehandles. The marker is removed once the tree has been walked. - - For subsequent runs, or for a new deployment, we simply ensure that the - docker nova user/group owns all directories. This is required as the - directories are created with root ownership in host_prep_tasks (the - docker nova uid/gid is not known in this context). - """ - def __init__(self, statedir, upgrade_marker='upgrade_marker', - nova_user='nova', secontext_marker='../_nova_secontext', - exclude_paths=None): - self.statedir = statedir - self.nova_user = nova_user - - self.upgrade_marker_path = os.path.join(statedir, upgrade_marker) - self.secontext_marker_path = os.path.normpath(os.path.join(statedir, secontext_marker)) - self.upgrade = os.path.exists(self.upgrade_marker_path) - - self.exclude_paths = [self.upgrade_marker_path] - if exclude_paths is not None: - for p in exclude_paths: - if not p.startswith(os.path.sep): - p = os.path.join(self.statedir, p) - self.exclude_paths.append(p) - - self.target_uid, self.target_gid = self._get_nova_ids() - self.previous_uid, self.previous_gid = self._get_previous_nova_ids() - self.id_change = (self.target_uid, self.target_gid) != \ - (self.previous_uid, self.previous_gid) - self.target_secontext = self._get_secontext() - - def _get_nova_ids(self): - nova_uid, nova_gid = pwd.getpwnam(self.nova_user)[2:4] - return nova_uid, nova_gid - - def _get_previous_nova_ids(self): - if self.upgrade: - statinfo = os.stat(self.upgrade_marker_path) - return statinfo.st_uid, statinfo.st_gid - else: - return self._get_nova_ids() - - def _get_secontext(self): - if os.path.exists(self.secontext_marker_path): - return selinux.lgetfilecon(self.secontext_marker_path)[1] - else: - return None - - def _walk(self, top, chcon=True): - for f in os.listdir(top): - pathname = os.path.join(top, f) - - if pathname in self.exclude_paths: - continue - - try: - pathinfo = PathManager(pathname) - LOG.info("Checking %s", pathinfo) - if pathinfo.is_dir: - # Always chown the directories - pathinfo.chown(self.target_uid, self.target_gid) - chcon_r = chcon - if chcon: - chcon_r = pathinfo.chcon(self.target_secontext) - self._walk(pathname, chcon_r) - elif self.id_change: - # Only chown files if it's an upgrade and the file is owned by - # the host nova uid/gid - pathinfo.chown( - self.target_uid if pathinfo.uid == self.previous_uid - else pathinfo.uid, - self.target_gid if pathinfo.gid == self.previous_gid - else pathinfo.gid - ) - if chcon: - pathinfo.chcon(self.target_secontext) - except Exception: - # Likely to have been caused by external systems - # interacting with this directory tree, - # especially on NFS e.g snapshot dirs. - # Just ignore it and continue on to the next entry - continue - - def run(self): - LOG.info('Applying nova statedir ownership') - LOG.info('Target ownership for %s: %d:%d', - self.statedir, - self.target_uid, - self.target_gid) - - pathinfo = PathManager(self.statedir) - LOG.info("Checking %s", pathinfo) - pathinfo.chown(self.target_uid, self.target_gid) - chcon = self.target_secontext is not None - - if chcon: - pathinfo.chcon(self.target_secontext) - - self._walk(self.statedir, chcon) - - if self.upgrade: - LOG.info('Removing upgrade_marker %s', - self.upgrade_marker_path) - os.unlink(self.upgrade_marker_path) - - LOG.info('Nova statedir ownership complete') - - -def get_exclude_paths(): - exclude_paths = os.environ.get('NOVA_STATEDIR_OWNERSHIP_SKIP') - if exclude_paths is not None: - exclude_paths = exclude_paths.split(os.pathsep) - return exclude_paths - - -if __name__ == '__main__': - NovaStatedirOwnershipManager('/var/lib/nova', exclude_paths=get_exclude_paths()).run() diff --git a/container_config_scripts/nova_wait_for_api_service.py b/container_config_scripts/nova_wait_for_api_service.py deleted file mode 100755 index 17d2bf29e3..0000000000 --- a/container_config_scripts/nova_wait_for_api_service.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import argparse -from configparser import ConfigParser -import logging -import logging.handlers -import os -import sys -import time - - -from keystoneauth1 import loading -from keystoneauth1 import session - -from novaclient import client -from novaclient.exceptions import ClientException - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('nova_wait_for_api_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -nova_cfg = '/etc/nova/nova.conf' - -if __name__ == '__main__': - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - parser.add_argument('-k', '--insecure', - action="store_false", - dest='insecure', - default=True, - help='Allow insecure connection when using SSL') - - args = parser.parse_args() - LOG.debug('Running with parameter insecure = %s', - args.insecure) - - if os.path.isfile(nova_cfg): - try: - config.read(nova_cfg) - except Exception: - LOG.exception('Error while reading nova.conf:') - else: - LOG.error('Nova configuration file %s does not exist', nova_cfg) - sys.exit(1) - - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options( - auth_url=config.get('neutron', - 'auth_url'), - username=config.get('neutron', - 'username'), - password=config.get('neutron', - 'password'), - project_name=config.get('neutron', - 'project_name'), - project_domain_name=config.get('neutron', - 'project_domain_name'), - user_domain_name=config.get('neutron', - 'user_domain_name')) - sess = session.Session(auth=auth, verify=args.insecure) - - # Wait until this host is listed in the service list - for i in range(iterations): - try: - nova = client.Client('2.11', session=sess, endpoint_type='internal') - nova.versions.list() - LOG.info('Nova-api service active') - sys.exit(0) - except ClientException: - LOG.info('Waiting for nova-api service') - except Exception: - LOG.exception( - 'Error while waiting for nova-api service') - time.sleep(timeout) -sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/nova_wait_for_compute_service.py b/container_config_scripts/nova_wait_for_compute_service.py deleted file mode 100644 index 61e0d618a2..0000000000 --- a/container_config_scripts/nova_wait_for_compute_service.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import argparse -from configparser import ConfigParser -import logging -import logging.handlers -import os -import socket -import sys -import time - - -from keystoneauth1 import loading -from keystoneauth1 import session - -from novaclient import client - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('nova_wait_for_compute_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -nova_cfg = '/etc/nova/nova.conf' - -if __name__ == '__main__': - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - parser.add_argument('-k', '--insecure', - action="store_false", - dest='insecure', - default=True, - help='Allow insecure connection when using SSL') - - args = parser.parse_args() - LOG.debug('Running with parameter insecure = %s', - args.insecure) - - if os.path.isfile(nova_cfg): - try: - config.read(nova_cfg) - except Exception: - LOG.exception('Error while reading nova.conf:') - else: - LOG.error('Nova configuration file %s does not exist', nova_cfg) - sys.exit(1) - - my_host = config.get('DEFAULT', 'host') - if not my_host: - # If host isn't set nova defaults to this - my_host = socket.gethostname() - - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options( - auth_url=config.get('neutron', - 'auth_url'), - username=config.get('neutron', - 'username'), - password=config.get('neutron', - 'password'), - project_name=config.get('neutron', - 'project_name'), - project_domain_name=config.get('neutron', - 'project_domain_name'), - user_domain_name=config.get('neutron', - 'user_domain_name')) - sess = session.Session(auth=auth, verify=args.insecure) - nova = client.Client('2.11', session=sess, endpoint_type='internal', - region_name=config.get('neutron', 'region_name')) - - # Wait until this host is listed in the service list - for i in range(iterations): - try: - service_list = nova.services.list(binary='nova-compute') - for entry in service_list: - host = getattr(entry, 'host', '') - zone = getattr(entry, 'zone', '') - if host == my_host and zone != 'internal': - LOG.info('Nova-compute service registered') - sys.exit(0) - LOG.info('Waiting for nova-compute service to register') - except Exception: - LOG.exception( - 'Error while waiting for nova-compute service to register') - time.sleep(timeout) -sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/pacemaker_mutex_restart_bundle.sh b/container_config_scripts/pacemaker_mutex_restart_bundle.sh deleted file mode 100755 index 063b7e3c8f..0000000000 --- a/container_config_scripts/pacemaker_mutex_restart_bundle.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -# pacemaker_mutex_restart_bundle.sh --lock mysql galera galera-bundle Master _ -# pacemaker_mutex_restart_bundle.sh --lock ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master - -set -u - -usage() { - echo "Restart a clustered resource in a coordinated way across the cluster" - echo "Usage:" - echo " $0 --lock " - echo -} - -log() { - echo "$(date -u): $1" -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -pacemaker_supports_promoted() { - # The Promoted token is only matched in recent pacemaker versions - grep -wq "Promoted" /usr/share/pacemaker/resources-*.rng -} - -ACTION=$1 -case $ACTION in - --help) usage; exit 0;; - --lock) ;; - *) error "Unknown action '$ACTION'";; -esac - -TRIPLEO_SERVICE=$2 -LOCK_NAME=${TRIPLEO_SERVICE}-restart-lock -LOCK_OWNER=$(crm_node -n 2>/dev/null) -rc=$? -if [ $rc -ne 0 ]; then - if [ $rc -eq 102 ]; then - log "Cluster is not running locally, no need to restart resource $TRIPLEO_SERVICE" - exit 0 - else - error "Unexpected error while connecting to the cluster (rc: $rc), bailing out" - fi -fi - -RESOURCE_NAME=$3 -BUNDLE_NAME=$4 -WAIT_TARGET_LOCAL=$5 -WAIT_TARGET_ANYWHERE=${6:-_} - -if pacemaker_supports_promoted; then - WAIT_TARGET_LOCAL=$(echo "$5" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - WAIT_TARGET_ANYWHERE=$(echo "${6:-_}" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - promoted_role="Promoted" -else - promoted_role="Master" -fi - -# The lock TTL should accommodate for the resource start/promote timeout -if [ "$RESOURCE_NAME" != "$BUNDLE_NAME" ]; then - if [ "$WAIT_TARGET_LOCAL" = "$promoted_role" ] || [ "$WAIT_TARGET_ANYWHERE" = "$promoted_role" ]; then - rsc_op="promote" - else - rsc_op="start" - fi - # - PCMK_TTL=$(cibadmin -Q | xmllint -xpath "string(//primitive[@id='${RESOURCE_NAME}']/operations/op[@name='${rsc_op}']/@timeout)" - | sed 's/s$//') - LOCK_TTL=$((PCMK_TTL + 30)) -else - # The podman RA's default start timeout - LOCK_TTL=90 -fi - -log "Acquire a ${LOCK_TTL}s restart lock for service $TRIPLEO_SERVICE before restarting it" -# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually -rc=1 -while [ $rc -ne 0 ]; do - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire $LOCK_NAME $LOCK_OWNER $LOCK_TTL - rc=$? - if [ $rc != 0 ]; then - if [ $rc -gt 1 ]; then - error "Could not acquire lock due to unrecoverable error (rc: $rc), bailing out" - else - log "Could not acquire lock, retrying" - sleep 10 - fi - fi -done - -log "Restart the service $TRIPLEO_SERVICE locally" -# Reuse the local restart script in t-h-t (driven by env var TRIPLEO_MINOR_UPDATE) -TRIPLEO_MINOR_UPDATE=true /var/lib/container-config-scripts/pacemaker_restart_bundle.sh $TRIPLEO_SERVICE $RESOURCE_NAME $BUNDLE_NAME $WAIT_TARGET_LOCAL $WAIT_TARGET_ANYWHERE - -# If we reached this point, always try to release the lock -log "Release the restart lock for service $TRIPLEO_SERVICE" -/var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $LOCK_NAME $LOCK_OWNER -rc=$? -if [ $rc -ne 0 ] && [ $rc -ne 1 ]; then - error "Could not release held lock (rc: $rc)" -fi diff --git a/container_config_scripts/pacemaker_mutex_shutdown.sh b/container_config_scripts/pacemaker_mutex_shutdown.sh deleted file mode 100755 index 1f168a16c6..0000000000 --- a/container_config_scripts/pacemaker_mutex_shutdown.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -# pacemaker_mutex_shutdown.sh --acquire -# pacemaker_mutex_shutdown.sh --release - -set -u - -usage() { - echo "Shutdown a cluster node in a coordinated way across the cluster" - echo "Usage:" - echo " $0 --acquire # prevent other node from shutting down until we hold the lock" - echo " $0 --release # release the lock, other node can compete for the shutdown lock" - echo -} - -log() { - echo "$(date -u): $1" -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually -shutdown_lock_acquire() { - local lockname=$1 - local requester=$2 - local ttl=$3 - local rc=1 - local current_owner - local owner_stopped - local owner_rc - - log "Acquiring the shutdown lock" - while [ $rc -ne 0 ]; do - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire-once $lockname $requester $ttl - rc=$? - if [ $rc -ne 0 ]; then - if [ $rc -eq 2 ]; then - error "Could not acquire the shutdown lock due to unrecoverable error (rc: $rc), bailing out" - else - # The lock is held by another node. - current_owner=$(/var/lib/container-config-scripts/pacemaker_resource_lock.sh --owner $lockname) - owner_rc=$? - if [ $owner_rc -eq 2 ]; then - error "Could not get the shutdown lock owner due to unrecoverable error (rc: $owner_rc), bailing out" - fi - if [ $owner_rc -eq 0 ]; then - # If the owner is marked as offline, that means it has shutdown and - # we can clean the lock preemptively and try to acquire it. - owner_stopped=$(crm_mon -1X | xmllint --xpath 'count(//nodes/node[@name="'${current_owner}'" and @online="false" and @unclean="false"])' -) - if [ "${owner_stopped}" = "1" ]; then - log "Shutdown lock held by stopped node '${current_owner}', lock can be released" - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $current_owner - continue - fi - fi - log "Shutdown lock held by another node (rc: $rc), retrying" - sleep 10 - fi - fi - done - log "Shutdown lock acquired" - return 0 -} - - -# Release the lock if we still own it. Not owning it anymore is not fatal -shutdown_lock_release() { - local lockname=$1 - local requester=$2 - local rc - - log "Releasing the shutdown lock" - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $requester - rc=$? - if [ $rc -ne 0 ]; then - if [ $rc -gt 1 ]; then - error "Could not release the shutdown lock due to unrecoverable error (rc: $rc), bailing out" - else - log "Shutdown lock no longer held, nothing to do" - fi - else - log "Shutdown lock released" - fi - return 0 -} - - -ACTION=$1 -if [ -z "$ACTION" ]; then - error "Action must be specified" -fi - -LOCK_NAME=tripleo-shutdown-lock -LOCK_OWNER=$(crm_node -n 2>/dev/null) -rc=$? -if [ $rc -ne 0 ]; then - if [ $rc -eq 102 ]; then - log "Cluster is not running locally, no need to acquire the shutdown lock" - exit 0 - else - error "Unexpected error while connecting to the cluster (rc: $rc), bailing out" - fi -fi - -# We start with a very high TTL, that long enough to accommodate a cluster stop. -# As soon as the node will get offline, the other competing node will be entitled -# to steal the lock, so they should never wait that long in practice. -LOCK_TTL=600 - - -case $ACTION in - --help) usage; exit 0;; - --acquire|-a) shutdown_lock_acquire ${LOCK_NAME} ${LOCK_OWNER} ${LOCK_TTL};; - --release|-r) shutdown_lock_release ${LOCK_NAME} ${LOCK_OWNER};; - *) error "Invalid action";; -esac -exit $? diff --git a/container_config_scripts/pacemaker_resource_lock.sh b/container_config_scripts/pacemaker_resource_lock.sh deleted file mode 100755 index fc4bf91250..0000000000 --- a/container_config_scripts/pacemaker_resource_lock.sh +++ /dev/null @@ -1,267 +0,0 @@ -#!/bin/bash - -MAX_RETRIES=10 -CIB_ENOTFOUND=105 - -usage() { - echo "Set a global property in the cluster with a validity timestamp." - echo "Usage:" - echo " $0 --acquire " - echo " $0 --release " - echo -} - -log() { - echo "$(date -u): $1" 1>&2 -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -lock_create() { - local name=$1 - local data=$2 - # cibadmin won't overwrite a key if someone else succeeded to create it concurrently - cibadmin --sync-call --scope crm_config --create --xml-text "" &>/dev/null - return $? -} - -lock_update() { - local name=$1 - local expected_data=$2 - local new_data=$3 - # we only update the lock we expect to see, so we can't update someone else's lock - cibadmin --sync-call --scope crm_config --modify --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." --xml-text "" &>/dev/null - return $? -} - -lock_delete() { - local name=$1 - local expected_data=$2 - # we only delete the lock we expect to see, so we can't delete someone else's lock - cibadmin --sync-call --scope crm_config --delete --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." &>/dev/null - return $? -} - -lock_get() { - local lockname=$1 - local res - local rc - res=$(cibadmin --query --scope crm_config --xpath "//cluster_property_set/nvpair[@name='$lockname']" 2>/dev/null) - rc=$? - if [ $rc -eq 0 ]; then - echo "$res" | sed -n 's/.*value="\([^"]*\)".*/\1/p' - fi - return $rc -} - -lock_owner() { - local lock=$1 - echo "$lock" | cut -d':' -f1 -} - -lock_has_expired() { - local lock=$1 - local expiry=$(echo "$lock" | cut -d':' -f2) - local now=$(date +%s) - test $now -ge $expiry -} - - -# Perform a lock action and restart if the CIB has been modified before -# committing the lock action -try_action() { - local fun=$1 - local lock=$2 - local requester=$3 - local args=${4:-} - local tries=$MAX_RETRIES - local rc=1 - if [ "$fun" = "lock_acquire" ] || [ "$fun" = "lock_release" ]; then - log "Try running $fun" - else - return 2 - fi - while [ $rc -ne 0 ]; do - $fun $lock $requester $args - rc=$? - if [ $rc -eq 0 ]; then - log "Operation $1 succeeded" - return 0 - elif [ $rc -eq 3 ]; then - # rc == 3 -> CIB changed before push - if [ $tries -eq 0 ]; then - log "Failed to commit after $MAX_RETRIES retries. Bailing out." - return 2 - else - log "Failed to commit. Retrying operation." - tries=$(($tries - 1)) - fi - elif [ $rc -eq 2 ]; then - # rc == 2 -> unrecoverable cib error (e.g. pacemaker down) - log "Unexpected failure. Bailing out" - return $rc - else - # rc == 1 -> lock error (not owner, lock doesn't exists) - return $rc - fi - done -} - -# The lock mechanism uses cibadmin's atomic creation so cluster-wide -# state coherency is guaranteed by pacemaker -lock_acquire() { - local lockname=$1 - local requester=$2 - local ttl=$3 - local rc - local lock - local expiry - local owner - - log "Check whether the lock is already held in the CIB" - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - log "Could not retrieve info from the CIB" - return 2 - fi - - if [ -n "$lock" ]; then - lock_has_expired $lock - rc=$? - if [ $rc -eq 0 ]; then - log "Lock has expired, now available for being held" - else - # lock is still held. check whether we're the owner - owner=$(lock_owner $lock) - if [ "$owner" = "$requester" ];then - log "Requester already owns the lock, acquiring attempt will just reconfigure the TTL" - else - log "Lock is held by someone else ($owner)" - return 1 - fi - fi - else - log "Lock is not held yet" - fi - - # prepare the lock info - expiry=$(($(date +%s) + $ttl)) - - if [ -n "$lock" ]; then - log "Attempting to update the lock" - lock_update $lockname "$lock" "$requester:$expiry" - rc=$? - else - log "Attempting to acquire the lock" - lock_create $lockname "$requester:$expiry" - rc=$? - fi - - if [ $rc -eq 0 ]; then - log "Lock '$lockname' acquired by '$requester', valid until $(date -d @$expiry)" - return 0 - else - log "CIB changed, lock cannot be acquired" - return 3 - fi -} - - -# The lock mechanism uses the CIB's num_updates tag to implement -# a conditional store. Cluster-wide locking is guaranteed by pacemaker -lock_release() { - local lockname=$1 - local requester=$2 - local rc - local lock - local owner - - log "Check whether the lock is already held in the CIB" - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - log "Could not retrieve info from the CIB" - return 2 - fi - - if [ -z "$lock" ]; then - log "Lock doesn't exist. Nothing to release" - return 0 - else - log "Lock exists, check whether we're the owner" - owner=$(lock_owner $lock) - if [ "$owner" != "$requester" ];then - log "Lock is held by someone else ($owner), will not unlock" - return 1 - fi - fi - - lock_delete $lockname "$lock" - rc=$? - - if [ $rc -eq 0 ]; then - log "Lock '$lockname' released by '$requester'" - return 0 - else - log "CIB deletion error, lock cannot be released" - return 3 - fi -} - - -# Retrieve the owner of a lock from the CIB -# this is a read-only operation, so no need to log debug info -lock_get_owner() { - local lockname=$1 - local rc - local lock - local owner - - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - return 2 - fi - - if [ -z "$lock" ]; then - return 1 - else - lock_owner $lock - return 0 - fi -} - - -ACTION=$1 -LOCKNAME=$2 -REQUESTER=$3 -TTL=${4:-60} - -if [ -z "$ACTION" ]; then - error "Action must be specified" -fi - -if [ $ACTION != "--help" ]; then - if [ -z "$LOCKNAME" ]; then - error "You must specific a lock name" - fi - if [ $ACTION != "--owner" ] && [ $ACTION != "-o" ]; then - if [ -z "$REQUESTER" ]; then - error "You must specific a lock requester" - fi - fi -fi - -case $ACTION in - --help) usage; exit 0;; - --acquire|-a) try_action lock_acquire $LOCKNAME $REQUESTER $TTL;; - --release|-r) try_action lock_release $LOCKNAME $REQUESTER;; - --acquire-once|-A) lock_acquire $LOCKNAME $REQUESTER $TTL;; - --owner|-o) lock_get_owner $LOCKNAME;; - *) error "Invalid action";; -esac -exit $? diff --git a/container_config_scripts/pacemaker_restart_bundle.sh b/container_config_scripts/pacemaker_restart_bundle.sh deleted file mode 100755 index 5c12541cba..0000000000 --- a/container_config_scripts/pacemaker_restart_bundle.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -set -u - -# ./pacemaker_restart_bundle.sh mysql galera galera-bundle Master _ -# ./pacemaker_restart_bundle.sh redis redis redis-bundle Slave Master -# ./pacemaker_restart_bundle.sh ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master -RESTART_SCRIPTS_DIR=$(dirname $0) -TRIPLEO_SERVICE=$1 -RESOURCE_NAME=$2 -BUNDLE_NAME=$3 -WAIT_TARGET_LOCAL=$4 -WAIT_TARGET_ANYWHERE=${5:-_} -TRIPLEO_MINOR_UPDATE="${TRIPLEO_MINOR_UPDATE:-false}" -TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS="${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS:-false}" - -bundle_can_be_restarted() { - local bundle=$1 - # As long as the resource bundle is managed by pacemaker and is - # not meant to stay stopped, no matter the state of any inner - # pcmk_remote or ocf resource, we should restart it to give it a - # chance to read the new config. - [ "$(crm_resource --meta -r $1 -g is-managed 2>/dev/null)" != "false" ] && \ - [ "$(crm_resource --meta -r $1 -g target-role 2>/dev/null)" != "Stopped" ] -} - -log() { - local msg=$1 - logger -t pcmkrestart "$1" -} - -HOSTNAME=$(/bin/hostname -s) -if [ x"${TRIPLEO_MINOR_UPDATE,,}" != x"true" ]; then - if [ x"${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS,,}" = x"false" ]; then - # Do not restart during initial deployment, as the resource - # has just been created. - SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name") - if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - log "Initial deployment, skipping the restart of ${BUNDLE_NAME}" - fi - exit 0 - else - # During a stack update, this script is called in parallel on - # every node the resource runs on, after the service's configs - # have been updated on all nodes. So we need to run pcs only - # once (e.g. on the service's boostrap node). - if bundle_can_be_restarted ${BUNDLE_NAME}; then - SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name") - if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - replicas_running=$(crm_resource -Q -r $BUNDLE_NAME --locate 2>&1 | wc -l) - if [ "$replicas_running" != "0" ]; then - log "Restarting ${BUNDLE_NAME} globally. Stopping:" - /sbin/pcs resource disable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME - log "Restarting ${BUNDLE_NAME} globally. Starting:" - /sbin/pcs resource enable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME - else - log "${BUNDLE_NAME} is not running anywhere," \ - "cleaning up to restart it globally if necessary" - /sbin/pcs resource cleanup $BUNDLE_NAME - fi - else - log "Skipping global restart of ${BUNDLE_NAME} on ${HOSTNAME} it will be restarted by node ${SERVICE_NODEID}" - fi - - else - log "No global restart needed for ${BUNDLE_NAME}." - fi - fi -else - # During a minor update workflow however, a host gets fully - # updated before updating the next one. So unlike stack - # update, at the time this script is called, the service's - # configs aren't updated on all nodes yet. So only restart the - # resource locally, where it's guaranteed that the config is - # up to date. - HOST=$(facter hostname) - - if bundle_can_be_restarted ${BUNDLE_NAME}; then - # if the resource is running locally, restart it - if crm_resource -r $BUNDLE_NAME --locate 2>&1 | grep -w -q "${HOST}"; then - log "Restarting ${BUNDLE_NAME} locally on '${HOST}'" - /sbin/pcs resource restart $BUNDLE_NAME "${HOST}" - - else - # At this point, if no resource is running locally, it's - # either because a) it has failed previously, or b) because - # it's an A/P resource running elsewhere. - # By cleaning up resource, we ensure that a) it will try to - # restart, or b) it won't do anything if the resource is - # already running elsewhere. - log "${BUNDLE_NAME} is currently not running on '${HOST}'," \ - "cleaning up its state to restart it if necessary" - /sbin/pcs resource cleanup $BUNDLE_NAME node="${HOST}" - fi - - # Wait until the resource is in the expected target state - $RESTART_SCRIPTS_DIR/pacemaker_wait_bundle.sh \ - $RESOURCE_NAME $BUNDLE_NAME \ - "$WAIT_TARGET_LOCAL" "$WAIT_TARGET_ANYWHERE" \ - "${HOST}" __PCMKTIMEOUT__ - else - log "No restart needed for ${BUNDLE_NAME}." - fi -fi diff --git a/container_config_scripts/pacemaker_wait_bundle.sh b/container_config_scripts/pacemaker_wait_bundle.sh deleted file mode 100755 index 93335aabdf..0000000000 --- a/container_config_scripts/pacemaker_wait_bundle.sh +++ /dev/null @@ -1,333 +0,0 @@ -#!/bin/bash - -# ---- -# Wait for an OCF resource or a bundle to be restarted -# ---- -# e.g.: -# M/S OCF: $0 galera galera-bundle Master -# clone OCF: $0 rabbitmq rabbitmq-bundle Started -# A/P M/S OCF: $0 redis redis-bundle Slave Master -# A/P bundle: $0 openstack-cinder-volume openstack-cinder-volume _ Started -# clone bundle: $0 haproxy-bundle haproxy-bundle Started - -# design note 1: -# - this script is called during a minor update; it is called -# once per node that hosts a service replica. -# - the purpose of this script is to ensure that restarting the -# service replica locally won't disrupt the service availability -# for the end user. To reach that goal, the script waits until the -# service is restarted locally or globally and reaches a given -# target state (i.e. Started, Slave or Master). -# design note 2: -# - we don't want to track restart error: our only job is to ensure -# service restart synchronization, not service health. -# - In particular, we don't want to error out in case the resource -# cannot be restarted locally, because that would make the minor -# update fail, even if potentially other replicas still provide -# the service. -# design note 3: -# - we can bail out early if we determine that the resource can't -# be restarted automatically by pacemaker (e.g. its "blocked", -# unmanaged or disabled). - -log() { - local msg=$1 - echo "$(date -u): $1" -} - -usage() { - echo 2>&1 "Usage: $0 NAME BUNDLE_NAME ROLE_LOCAL [ROLE_ANYWHERE] [HOST] [TIMEOUT]" - exit 1 -} - -pacemaker_supports_promoted() { - # The Promoted token is only matched in recent pacemaker versions - grep -wq "Promoted" /usr/share/pacemaker/resources-*.rng -} - -# -# Utility functions to detect stuck resources -# - -bundle_failures_locally() { - local engine=$BUNDLE_CONTAINER_ENGINE - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - local replica_name - for i in $(seq 0 $last); do - replica_name=${BUNDLE_NAME}-${engine}-${i} - crm_failcount -q -G -r $replica_name -N $HOST - done -} - -bundle_failures_globally() { - local engine=$BUNDLE_CONTAINER_ENGINE - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - for i in $(seq 0 $last); do - crm_failcount -q -G -r ${BUNDLE_NAME}-${engine}-${i} - done -} - -bundle_running_globally() { - local engine=$BUNDLE_CONTAINER_ENGINE - # return the number of running bundles replica, i.e. the number of - # docker/podman resource replicas currently running in the cluster - crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:heartbeat:${engine}']/node)" - -} - -ocf_failures_globally() { - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - local bundle_node - for i in $(seq 0 $last); do - bundle_node=${BUNDLE_NAME}-${i} - crm_failcount -q -G -r $NAME -N $bundle_node - done -} - -did_resource_failed_locally() { - local failures - local running - local remotehost - if [ "${NAME}" != "${BUNDLE_NAME}" ]; then - # if we're dealing with an ocf resource, it is running on a - # pacemaker_remote rather that on the real host, and the - # failcounts are thus associated to the pcmk remote. Replace - # the host's name with the pcmk remote's name. - remotehost=$(crm_mon --as-xml | xmllint --xpath "string(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource/node[@name='${HOST}']/../../resource[@resource_agent='${OCF}:pacemaker:remote']/@id)" -) - if [ -n "${remotehost}" ]; then - crm_failcount -q -G -r $NAME -N $remotehost | grep -q -w INFINITY - return $? - fi - # If no pcmk remote is currently running, the failcount from - # the ocf resource is useless, compute the failcount from the - # bundle case instead (computed below). - fi - - # for bundles, pacemaker can run any bundle replica locally - # (e.g. galera-bundle-docker-{0,1,2}), and a failure happens when - # there are no more replica to try. - # That is, when _at least_ one replica failed locally, and all the - # others either failed or are currently running elsewhere. - failures=$(bundle_failures_locally $HOST | grep -c -w INFINITY) - running=$(bundle_running_globally) - test $failures -gt 0 && \ - test $(( $failures + $running )) -ge $BUNDLE_REPLICAS -} - -did_resource_failed_globally() { - local remotecount - local failures - if [ "${NAME}" != "${BUNDLE_NAME}" ]; then - # we check the state of an ocf resource only if the - # pcmkremotes are started - remotecount=$(crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node)" -) - if [ "${remotecount}" = "0" ]; then - # no pcmkremote is running, so check the bundle state - # instead of checking the ocf resource - # bundle failed if all ${BUNDLE_REPLICAS} replicas failed - failures=$(bundle_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - else - # ocf resource failed if it failed to start on - # all $BUNDLE_REPLICAS bundle nodes - failures=$(ocf_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - fi - else - # bundle failed if all ${BUNDLE_REPLICAS} replicas failed - failures=$(bundle_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - fi -} - - -# Input validation -# - -NAME=$1 -if [ -z "${NAME}" ]; then - echo 2>&1 "Error: argument NAME must not be empty" - exit 1 -fi - -BUNDLE_NAME=$2 -if [ -z "${BUNDLE_NAME}" ]; then - echo 2>&1 "Error: argument BUNDLE_NAME must not be empty" - exit 1 -fi - -ROLE_LOCAL=$3 -if [ "${ROLE_LOCAL}" = "_" ]; then - ROLE_LOCAL="" -fi - -ROLE_ANYWHERE=$4 -if [ "${ROLE_ANYWHERE}" = "_" ]; then - ROLE_ANYWHERE="" -fi - -if [ -z "${ROLE_LOCAL}" ]; then - if [ -z "${ROLE_ANYWHERE}" ]; then - echo 2>&1 "Error: either ROLE_LOCAL or ROLE_ANYWHERE must be non empty" - exit 1 - fi -else - if !(echo "${ROLE_LOCAL}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then - echo 2>&1 "Error: argument ROLE_LOCAL must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'" - exit 1 - fi -fi - -if [ -n "${ROLE_ANYWHERE}" ] && !(echo "${ROLE_ANYWHERE}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then - echo 2>&1 "Error: argument ROLE_ANYWHERE must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'" - exit 1 -fi - -# Ensure compatibility with pacemaker 2.1 -if pacemaker_supports_promoted; then - ROLE_LOCAL=$(echo "$ROLE_LOCAL" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - ROLE_ANYWHERE=$(echo "$ROLE_ANYWHERE" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - OCF="ocf" -else - OCF="ocf:" -fi - -HOST=${5:-$(facter hostname)} -TIMEOUT=${6:-__PCMKTIMEOUT__} - - -# Configure the search -# ---- -# Note: we can't use crm_resource in all searches because we can't -# easily extract the host the OCF resources run on (crm_resource -# returns the pcmk-remote nodes rather than the hosts) -# So instead, we implement various searches with XPath directly. - -if [ "${BUNDLE_NAME}" != "${NAME}" ]; then - # ocf resource - local_resource_xpath="//bundle/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node[@name='${HOST}']/../../resource[@id='${NAME}']" - any_resource_xpath="//bundle//resource[@id='${NAME}']" - replicas_xpath="//bundle/primitive[@id='${BUNDLE_NAME}']/../*[boolean(@image) and boolean(@replicas)]" -else - # bundle resource - local_resource_xpath="//bundle[@id='${NAME}']/replica/resource/node[@name='${HOST}']/../../resource" - any_resource_xpath="//bundle[@id='${NAME}']//resource" - replicas_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]" -fi - -bundle_def_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]" -BUNDLE_CONTAINER_ENGINE=$(cibadmin -Q | xmllint --xpath "name(${bundle_def_xpath})" -) -BUNDLE_REPLICAS=$(cibadmin -Q | xmllint --xpath "string(${bundle_def_xpath}/@replicas)" -) - - -# The wait algorithm follows a two-stage approach -# 1. Depending on how the script is called, we first check whether -# the resource is restarted locally. An A/P resource may be -# restarted elsewhere in the cluster. -# 2. If needed, check whether the A/P resource has restarted -# elsewhere. For A/P M/S resources, in case the resource is -# restarted as Slave locally, ensure a Master is available. - -success=1 -bailout=1 -timeout=$TIMEOUT -role="" - -# Stage 1: local check -if [ -n "$ROLE_LOCAL" ]; then - log "Waiting until ${NAME} has restarted on ${HOST} and is in state ${ROLE_LOCAL}" - log "Will probe resource state with the following XPath pattern: ${local_resource_xpath}" - - while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do - resource=$(crm_mon -r --as-xml | xmllint --xpath "${local_resource_xpath}" - 2>/dev/null) - role=$(echo "${resource}" | sed -ne 's/.*\Wrole="\([^"]*\)".*/\1/p') - - if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then - log "${NAME} is unmanaged, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then - log "${NAME} is disabled, will never reach target role. Bailing out" - bailout=0 - continue - elif echo "${resource}" | grep -q -w "\Wblocked=\"true\""; then - log "${NAME} is blocked, will never reach target role. Bailing out" - bailout=0 - continue - elif did_resource_failed_locally; then - log "${NAME} is in failed state, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$role" = "$ROLE_LOCAL" ]; then - success=0 - continue - elif [ -n "$ROLE_ANYWHERE" ] && [ "$role" = "$ROLE_ANYWHERE" ]; then - # A/P: we are restarted in the expected state - success=0 - continue - else - log "Waiting for ${NAME} to transition to role ${ROLE_LOCAL} on ${HOST}" - fi - - if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then - sleep 4 - timeout=$((timeout-4)) - fi - done -fi - -# Stage 2: global check -if [ $timeout -gt 0 ] && [ -n "$ROLE_ANYWHERE" ] && [ "$role" != "$ROLE_ANYWHERE" ]; then - log "Wait until ${NAME} is restarted anywhere in the cluster in state ${ROLE_ANYWHERE}" - log "Will probe resource state with the following XPath pattern: ${any_resource_xpath}" - - success=1 - bailout=1 - while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do - resources=$(crm_mon -r --as-xml | xmllint --xpath "${any_resource_xpath}" - 2>/dev/null) - if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then - log "${NAME} is unmanaged, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then - log "${NAME} is disabled, will never reach target role. Bailing out" - bailout=0 - continue - elif ! (echo "${resources}" | grep -q -w "\Wblocked=\"false\""); then - log "${NAME} blocked, will never reach target role. Bailing out" - bailout=0 - continue - elif did_resource_failed_globally; then - log "${NAME} is in failed state, will never reach target role. Bailing out" - bailout=0 - continue - elif echo "${resources}" | grep -q -w "\Wrole=\"${ROLE_ANYWHERE}\""; then - success=0 - continue - else - log "Waiting for ${NAME} to transition to role ${ROLE_ANYWHERE} anywhere in the cluster" - fi - - if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then - sleep 4 - timeout=$((timeout-4)) - fi - done -fi - -if [ $timeout -le 0 ]; then - log "Timeout reached after ${TIMEOUT}s while waiting for ${NAME} to be restarted" -elif [ $bailout -le 0 ]; then - log "Restart monitoring for ${NAME} cancelled" -fi - -if [ $success -eq 0 ]; then - log "${NAME} successfully restarted" -else - log "${NAME} was not restarted properly" -fi - -# Don't block minor update or stack update if the wait was unsuccessful -exit 0 diff --git a/container_config_scripts/placement_wait_for_service.py b/container_config_scripts/placement_wait_for_service.py deleted file mode 100755 index da6286eeb9..0000000000 --- a/container_config_scripts/placement_wait_for_service.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# shell script to check if placement API is up after X attempts. -# Default max is 60 iterations with 10s (default) timeout in between. - -from configparser import ConfigParser -import logging -import logging.handlers -import os -import re -import sys -import time - -from keystoneauth1.identity import v3 -from keystoneauth1 import session -from keystoneclient.v3 import client -import requests - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('placement_wait_for_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -placement_cfg = '/etc/placement/placement.conf' - -if __name__ == '__main__': - if os.path.isfile(placement_cfg): - try: - config.read(placement_cfg) - except Exception: - LOG.exception('Error while reading placement.conf:') - else: - LOG.error('Placement configuration file %s does not exist', - placement_cfg) - sys.exit(1) - - # get keystone client with details from [keystone_authtoken] section - auth = v3.Password( - user_domain_name=config.get('keystone_authtoken', 'user_domain_name'), - username=config.get('keystone_authtoken', 'username'), - password=config.get('keystone_authtoken', 'password'), - project_name=config.get('keystone_authtoken', 'project_name'), - project_domain_name=config.get('keystone_authtoken', - 'project_domain_name'), - auth_url=config.get('keystone_authtoken', 'auth_url') + '/v3') - sess = session.Session(auth=auth, verify=False) - keystone = client.Client(session=sess, interface='internal') - - iterations_endpoint = iterations - placement_endpoint_url = None - while iterations_endpoint > 1: - iterations_endpoint -= 1 - try: - # get placement service id - placement_service_id = keystone.services.list( - name='placement')[0].id - - # get placement endpoint - # Note: puppet-placement does not support setting the interface - # until we have https://review.opendev.org/688862. - # Lets hard code 'internal' for now. - placement_endpoint_url = keystone.endpoints.list( - service=placement_service_id, - region=config.get('keystone_authtoken', 'region_name'), - interface='internal')[0].url - if not placement_endpoint_url: - LOG.error('Failed to get placement service endpoint!') - else: - break - except Exception: - LOG.exception('Retry - Failed to get placement service endpoint:') - time.sleep(timeout) - - if not placement_endpoint_url: - LOG.error('Failed to get placement service endpoint!') - sys.exit(1) - - # we should have CURRENT in the request response from placement: - # {"versions": [{"status": "CURRENT", "min_version": "1.0", "max_version": - # "1.29", "id": "v1.0", "links": [{"href": "", "rel": "self"}]}]} - response_reg = re.compile('.*CURRENT,*') - - while iterations > 1: - iterations -= 1 - try: - r = requests.get(placement_endpoint_url + '/', verify=False) - if r.status_code == 200 and response_reg.match(r.text): - LOG.info('Placement service up! - %s', r.text) - sys.exit(0) - else: - LOG.info('response - %r', r) - LOG.info('Placement service not up - %s, %s', - r.status_code, - r.text) - except Exception: - LOG.exception('Error query the placement endpoint:') - time.sleep(timeout) - - sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/pyshim.sh b/container_config_scripts/pyshim.sh deleted file mode 100755 index e6077fc666..0000000000 --- a/container_config_scripts/pyshim.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Usage: pyshim.sh