From c2ff9b8f4900fcc95698368960f001da4d25d43d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 24 Feb 2024 11:32:57 -0800 Subject: [PATCH] Retire Tripleo: remove repo content TripleO project is retiring - https://review.opendev.org/c/openstack/governance/+/905145 this commit remove the content of this project repo Change-Id: I7f1dc7801a025d9bb28110f74340846b99870b79 --- .ansible-lint | 25 - .gitignore | 191 - .stestr.conf | 3 - LICENSE | 202 - README.rst | 140 +- babel.cfg | 1 - baremetal-samples/baremetal_deployment.yaml | 122 - bindep.txt | 11 - ci/README.rst | 11 - ci/common/ironic_standalone_post.yaml | 17 - ci/common/vbmc_setup.yaml | 64 - ci/custom_ci_roles_data.yaml | 254 - ci/custom_names_network_data.yaml | 60 - ci/custom_names_vip_data.yaml | 6 - ci/environments/README.rst | 4 - ci/environments/ceph-min-osds.yaml | 2 - ci/environments/multinode-containers.yaml | 52 - ci/environments/multinode-core.yaml | 34 - .../network/multiple-nics-ipv6/README | 23 - .../network-environment.yaml | 18 - .../network-isolation-absolute.yaml | 29 - .../multiple-nics-ipv6/network-isolation.yaml | 28 - .../multiple-nics-ipv6/ui-settings.pickle | 732 -- ci/environments/network/multiple-nics/README | 23 - .../custom-network-environment.yaml | 27 - .../multiple-nics/network-environment.yaml | 19 - .../network-isolation-absolute.yaml | 32 - .../multiple-nics/network-isolation.yaml | 27 - .../network/multiple-nics/ui-settings.pickle | 716 -- ci/environments/network/public-bond/README | 23 - .../public-bond/network-environment.yaml | 19 - .../network-isolation-absolute.yaml | 26 - .../public-bond/network-isolation.yaml | 26 - .../network/public-bond/ui-settings.pickle | 699 -- ci/environments/neutron_dns_domain.yaml | 2 - ci/environments/neutron_l3_qos.yaml | 2 - ci/environments/neutron_sec_group_quota.yaml | 2 - ci/environments/octavia-kvm.yaml | 7 - ci/environments/ovb-ha.yaml | 10 - .../scenario000-multinode-containers.yaml | 27 - ci/environments/scenario000-standalone.yaml | 164 - .../scenario001-multinode-containers.yaml | 168 - ci/environments/scenario001-standalone.yaml | 222 - ci/environments/scenario002-standalone.yaml | 68 - ci/environments/scenario003-standalone.yaml | 48 - ci/environments/scenario004-standalone.yaml | 87 - .../scenario007-multinode-containers.yaml | 114 - ci/environments/scenario007-standalone.yaml | 51 - .../scenario010-multinode-containers.yaml | 104 - ci/environments/scenario010-standalone.yaml | 70 - ci/environments/scenario012-standalone.yaml | 29 - ci/environments/scenario013-standalone.yaml | 30 - ci/environments/standalone-ipa.yaml | 22 - ci/network_data.yaml | 50 - ci/network_data_v6.yaml | 54 - ci/vip_data.yaml | 6 - ci/vip_data_v6.yaml | 6 - common/common-container-config-scripts.yaml | 25 - common/common-container-setup-tasks.yaml | 104 - common/container-puppet.sh | 180 - common/deploy-steps-playbooks-common.yaml | 101 - common/deploy-steps-tasks-step-0.yaml | 46 - common/deploy-steps-tasks-step-1.yaml | 196 - common/deploy-steps-tasks.yaml | 140 - common/deploy-steps.j2 | 1330 --- common/generate-config-tasks.yaml | 54 - common/hiera-steps-tasks.yaml | 31 - common/host-container-puppet-tasks.yaml | 33 - common/post.j2.yaml | 1 - common/services/role.role.j2.yaml | 443 - config-download-software.yaml | 74 - config-download-structured.yaml | 74 - container_config_scripts/__init__.py | 0 .../monitoring/collectd_check_health.py | 95 - container_config_scripts/mysql_upgrade_db.sh | 15 - .../nova_api_ensure_default_cells.py | 161 - .../nova_libvirt_init_secret.sh | 70 - .../nova_statedir_ownership.py | 247 - .../nova_wait_for_api_service.py | 109 - .../nova_wait_for_compute_service.py | 118 - .../pacemaker_mutex_restart_bundle.sh | 103 - .../pacemaker_mutex_shutdown.sh | 120 - .../pacemaker_resource_lock.sh | 267 - .../pacemaker_restart_bundle.sh | 104 - .../pacemaker_wait_bundle.sh | 333 - .../placement_wait_for_service.py | 134 - container_config_scripts/pyshim.sh | 45 - container_config_scripts/tests/__init__.py | 0 .../test_nova_api_ensure_default_cells.py | 168 - .../tests/test_nova_statedir_ownership.py | 426 - container_config_scripts/wait-port-and-run.sh | 18 - deployed-server/README.rst | 73 - deployed-server/deployed-neutron-port.yaml | 105 - deployed-server/deployed-server.yaml | 79 - deployed-server/scripts/enable-ssh-admin.sh | 26 - deployment/README.rst | 305 - deployment/aide/aide-baremetal-ansible.yaml | 83 - .../aodh/aodh-api-container-puppet.yaml | 392 - deployment/aodh/aodh-base.yaml | 107 - .../aodh/aodh-evaluator-container-puppet.yaml | 156 - .../aodh/aodh-listener-container-puppet.yaml | 155 - .../aodh/aodh-notifier-container-puppet.yaml | 156 - .../apache/apache-baremetal-ansible.j2.yaml | 211 - .../apache/apache-baremetal-puppet.j2.yaml | 174 - .../auditd/auditd-baremetal-ansible.yaml | 51 - .../barbican-api-container-puppet.yaml | 858 -- .../barbican-backend-dogtag-puppet.yaml | 62 - .../barbican-backend-kmip-puppet.yaml | 59 - ...barbican-backend-pkcs11-crypto-puppet.yaml | 122 - ...barbican-backend-simple-crypto-puppet.yaml | 47 - .../barbican/barbican-client-puppet.yaml | 67 - ...ometer-agent-central-container-puppet.yaml | 206 - ...ometer-agent-compute-container-puppet.yaml | 146 - ...eilometer-agent-ipmi-container-puppet.yaml | 153 - ...r-agent-notification-container-puppet.yaml | 240 - .../ceilometer-base-container-puppet.yaml | 125 - deployment/cephadm/ceph-base.yaml | 636 -- deployment/cephadm/ceph-client.yaml | 155 - deployment/cephadm/ceph-external.yaml | 65 - deployment/cephadm/ceph-grafana.yaml | 203 - deployment/cephadm/ceph-ingress.yaml | 72 - deployment/cephadm/ceph-mds.yaml | 52 - deployment/cephadm/ceph-mgr.yaml | 177 - deployment/cephadm/ceph-mon.yaml | 78 - deployment/cephadm/ceph-nfs.yaml | 139 - deployment/cephadm/ceph-osd.yaml | 80 - deployment/cephadm/ceph-rbdmirror.yaml | 87 - deployment/cephadm/ceph-rgw.yaml | 213 - .../certs/ca-certs-baremetal-ansible.yaml | 46 - .../cinder/cinder-api-container-puppet.yaml | 457 - ...nder-backend-dellemc-powerflex-puppet.yaml | 144 - ...inder-backend-dellemc-powermax-puppet.yaml | 105 - ...der-backend-dellemc-powerstore-puppet.yaml | 97 - .../cinder-backend-dellemc-sc-puppet.yaml | 141 - .../cinder-backend-dellemc-unity-puppet.yaml | 99 - .../cinder-backend-dellemc-vnx-puppet.yaml | 120 - ...cinder-backend-dellemc-xtremio-puppet.yaml | 120 - .../cinder/cinder-backend-ibm-svf-puppet.yaml | 96 - .../cinder/cinder-backend-netapp-puppet.yaml | 162 - .../cinder/cinder-backend-nfs-puppet.yaml | 109 - .../cinder/cinder-backend-nvmeof-puppet.yaml | 94 - .../cinder/cinder-backend-pure-puppet.yaml | 156 - .../cinder/cinder-backend-rbd-puppet.yaml | 117 - .../cinder-backup-container-puppet.yaml | 336 - .../cinder-backup-pacemaker-puppet.yaml | 369 - deployment/cinder/cinder-base.yaml | 172 - .../cinder-common-container-puppet.yaml | 428 - .../cinder-scheduler-container-puppet.yaml | 183 - .../cinder-volume-container-puppet.yaml | 221 - .../cinder-volume-pacemaker-puppet.yaml | 366 - .../openstack-clients-baremetal-ansible.yaml | 37 - ...er-image-prepare-baremetal-ansible.j2.yaml | 134 - deployment/containers-common.yaml | 177 - deployment/database/mysql-base.yaml | 191 - deployment/database/mysql-client.yaml | 54 - .../database/mysql-container-puppet.yaml | 340 - .../database/mysql-pacemaker-puppet.yaml | 676 -- deployment/database/redis-base-puppet.yaml | 132 - .../database/redis-container-puppet.yaml | 321 - .../database/redis-pacemaker-puppet.yaml | 474 - deployment/deprecated/README.rst | 6 - .../deprecated/etcd-container-puppet.yaml | 367 - .../deprecated/multipathd-container.yaml | 178 - .../neutron-bgpvpn-api-container-puppet.yaml | 71 - ...utron-bgpvpn-bagpipe-baremetal-puppet.yaml | 68 - .../neutron-l2gw-agent-baremetal-puppet.yaml | 101 - .../neutron-l2gw-api-container-puppet.yaml | 89 - ...on-linuxbridge-agent-baremetal-puppet.yaml | 88 - .../nova/nova-libvirt-container-puppet.yaml | 960 -- .../snmp/snmp-baremetal-puppet.yaml | 113 - .../designate-api-container-puppet.yaml | 242 - deployment/designate/designate-base.yaml | 73 - .../designate/designate-bind-container.yaml | 254 - .../designate-central-container-puppet.yaml | 315 - .../designate-mdns-container-puppet.yaml | 247 - .../designate-producer-container-puppet.yaml | 157 - .../designate-sink-container-puppet.yaml | 152 - .../designate-worker-container-puppet.yaml | 197 - deployment/etcd/etcd-container-ansible.yaml | 334 - deployment/experimental/README.rst | 6 - deployment/frr/frr-container-ansible.yaml | 485 - .../glance/glance-api-container-puppet.yaml | 898 -- .../glance-api-edge-container-puppet.yaml | 88 - .../glance-api-internal-container-puppet.yaml | 183 - .../gnocchi/gnocchi-api-container-puppet.yaml | 491 - deployment/gnocchi/gnocchi-base.yaml | 182 - .../gnocchi-metricd-container-puppet.yaml | 240 - .../gnocchi-statsd-container-puppet.yaml | 227 - .../haproxy/haproxy-container-puppet.yaml | 385 - .../haproxy-edge-container-puppet.yaml | 133 - .../haproxy-internal-tls-certmonger.j2.yaml | 176 - .../haproxy/haproxy-pacemaker-puppet.yaml | 590 - .../haproxy-public-tls-certmonger.yaml | 196 - .../haproxy/haproxy-public-tls-inject.yaml | 190 - .../heat/heat-api-cfn-container-puppet.yaml | 254 - .../heat/heat-api-container-puppet.yaml | 328 - deployment/heat/heat-base-puppet.yaml | 239 - .../heat/heat-engine-container-puppet.yaml | 327 - .../heat-ephemeral-container-ansible.yaml | 196 - .../horizon/horizon-container-puppet.yaml | 424 - .../image-serve-baremetal-ansible.yaml | 93 - .../ipa/ipaservices-baremetal-ansible.yaml | 225 - deployment/ipsec/ipsec-baremetal-ansible.yaml | 118 - .../ironic/ironic-api-container-puppet.yaml | 428 - deployment/ironic/ironic-base-puppet.yaml | 114 - .../ironic-conductor-container-puppet.yaml | 757 -- .../ironic-inspector-container-puppet.yaml | 627 -- ...ironic-neutron-agent-container-puppet.yaml | 163 - .../ironic/ironic-pxe-container-puppet.yaml | 227 - .../iscsid/iscsid-container-puppet.yaml | 147 - .../kernel/kernel-baremetal-ansible.yaml | 206 - .../kernel-boot-params-baremetal-ansible.yaml | 158 - .../keystone/keystone-container-puppet.yaml | 933 -- deployment/logging/files/barbican-api.yaml | 64 - deployment/logging/files/glance-api.yaml | 63 - deployment/logging/files/haproxy.yaml | 23 - deployment/logging/files/heat-api-cfn.yaml | 30 - deployment/logging/files/heat-api.yaml | 30 - deployment/logging/files/heat-engine.yaml | 64 - deployment/logging/files/keystone.yaml | 59 - deployment/logging/files/neutron-api.yaml | 73 - deployment/logging/files/neutron-common.yaml | 40 - deployment/logging/files/nova-api.yaml | 73 - deployment/logging/files/nova-common.yaml | 89 - deployment/logging/files/nova-libvirt.yaml | 170 - deployment/logging/files/nova-metadata.yaml | 62 - deployment/logging/files/placement-api.yaml | 62 - .../logging/rsyslog-baremetal-ansible.yaml | 50 - .../logging/rsyslog-container-puppet.yaml | 333 - .../rsyslog-sidecar-container-puppet.yaml | 77 - deployment/logging/stdout/barbican-api.yaml | 84 - deployment/logging/stdout/glance-api.yaml | 21 - deployment/logging/stdout/haproxy.yaml | 57 - deployment/logging/stdout/heat-api-cfn.yaml | 80 - deployment/logging/stdout/heat-api.yaml | 80 - deployment/logging/stdout/heat-engine.yaml | 22 - deployment/logging/stdout/keystone.yaml | 84 - deployment/logging/stdout/neutron-common.yaml | 28 - deployment/logging/stdout/nova-api.yaml | 80 - deployment/logging/stdout/nova-common.yaml | 32 - deployment/logging/stdout/nova-libvirt.yaml | 161 - deployment/logging/stdout/nova-metadata.yaml | 80 - deployment/logging/stdout/placement-api.yaml | 80 - .../login-defs-baremetal-ansible.yaml | 64 - .../logrotate-crond-container-puppet.yaml | 198 - deployment/logrotate/tmpwatch-install.yaml | 40 - .../manila/manila-api-container-puppet.yaml | 415 - deployment/manila/manila-backend-cephfs.yaml | 102 - .../manila-backend-flashblade-puppet.yaml | 77 - deployment/manila/manila-backend-isilon.yaml | 76 - deployment/manila/manila-backend-netapp.yaml | 128 - .../manila/manila-backend-powermax.yaml | 80 - deployment/manila/manila-backend-unity.yaml | 94 - deployment/manila/manila-backend-vnx.yaml | 90 - deployment/manila/manila-base.yaml | 106 - .../manila-scheduler-container-puppet.yaml | 159 - deployment/manila/manila-share-common.yaml | 77 - .../manila/manila-share-container-puppet.yaml | 243 - .../manila/manila-share-pacemaker-puppet.yaml | 382 - ...masquerade-networks-baremetal-ansible.yaml | 48 - .../memcached/memcached-container-puppet.yaml | 392 - .../rpc-qdrouterd-container-puppet.yaml | 269 - .../metrics/collectd-container-ansible.yaml | 464 - .../metrics/collectd-container-puppet.yaml | 661 -- deployment/metrics/qdr-container-ansible.yaml | 376 - deployment/metrics/qdr-container-puppet.yaml | 392 - .../multipathd-container-ansible.yaml | 142 - .../derive_pci_passthrough_whitelist.py | 558 - deployment/neutron/kill-script | 75 - ...ron-agents-ib-config-container-puppet.yaml | 96 - .../neutron/neutron-api-container-puppet.yaml | 667 -- deployment/neutron/neutron-base.yaml | 167 - deployment/neutron/neutron-cleanup | 27 - deployment/neutron/neutron-cleanup.service | 12 - .../neutron-dhcp-container-puppet.yaml | 436 - .../neutron/neutron-l3-container-puppet.yaml | 384 - .../neutron-metadata-container-puppet.yaml | 213 - .../neutron-mlnx-agent-container-puppet.yaml | 192 - ...tron-ovn-dpdk-config-container-puppet.yaml | 86 - .../neutron-ovs-agent-container-puppet.yaml | 405 - ...utron-ovs-dpdk-agent-container-puppet.yaml | 182 - ...plugin-ml2-cisco-vts-container-puppet.yaml | 110 - .../neutron-plugin-ml2-container-puppet.yaml | 82 - ...-ml2-mlnx-sdn-assist-container-puppet.yaml | 122 - .../neutron/neutron-plugin-ml2-ovn.yaml | 183 - deployment/neutron/neutron-plugin-ml2.yaml | 187 - .../neutron-sfc-api-container-puppet.yaml | 74 - .../neutron-sriov-agent-container-puppet.yaml | 226 - .../nova/nova-api-container-puppet.yaml | 648 -- deployment/nova/nova-apidb-client-puppet.yaml | 67 - deployment/nova/nova-az-config.yaml | 86 - deployment/nova/nova-base-puppet.yaml | 303 - .../nova-compute-common-container-puppet.yaml | 88 - .../nova/nova-compute-container-puppet.yaml | 1755 --- .../nova/nova-conductor-container-puppet.yaml | 275 - deployment/nova/nova-db-client-puppet.yaml | 67 - .../nova/nova-ironic-container-puppet.yaml | 251 - deployment/nova/nova-libvirt-common.yaml | 176 - .../nova-libvirt-guests-container-puppet.yaml | 79 - .../nova/nova-manager-container-puppet.yaml | 113 - .../nova/nova-metadata-container-puppet.yaml | 315 - ...ova-migration-target-container-puppet.yaml | 237 - ...nova-modular-libvirt-container-puppet.yaml | 1019 -- .../nova/nova-scheduler-container-puppet.yaml | 342 - .../nova/nova-vnc-proxy-container-puppet.yaml | 432 - .../octavia/octavia-api-container-puppet.yaml | 511 - deployment/octavia/octavia-base.yaml | 371 - .../octavia/octavia-deployment-config.j2.yaml | 370 - ...tavia-health-manager-container-puppet.yaml | 256 - ...octavia-housekeeping-container-puppet.yaml | 186 - .../octavia-worker-container-puppet.yaml | 189 - .../providers/ovn-provider-config.yaml | 151 - .../openvswitch-dpdk-baremetal-ansible.yaml | 266 - ...ch-dpdk-netcontrold-container-ansible.yaml | 92 - .../ovn/ovn-controller-container-puppet.yaml | 642 -- deployment/ovn/ovn-dbs-cluster-ansible.yaml | 356 - deployment/ovn/ovn-dbs-container-puppet.yaml | 289 - deployment/ovn/ovn-dbs-pacemaker-puppet.yaml | 480 - .../ovn/ovn-metadata-container-puppet.yaml | 360 - .../clustercheck-container-puppet.yaml | 148 - .../compute-instanceha-baremetal-puppet.yaml | 45 - .../pacemaker/pacemaker-baremetal-puppet.yaml | 385 - .../pacemaker-remote-baremetal-puppet.yaml | 173 - .../placement-api-container-puppet.yaml | 356 - .../podman/podman-baremetal-ansible.yaml | 263 - .../qdr/qdrouterd-container-puppet.yaml | 163 - .../rabbitmq/rabbitmq-container-puppet.yaml | 459 - ...tmq-messaging-notify-container-puppet.yaml | 351 - ...tmq-messaging-notify-pacemaker-puppet.yaml | 467 - ...bbitmq-messaging-notify-shared-puppet.yaml | 63 - .../rabbitmq-messaging-pacemaker-puppet.yaml | 441 - ...bbitmq-messaging-rpc-container-puppet.yaml | 351 - ...bbitmq-messaging-rpc-pacemaker-puppet.yaml | 475 - deployment/rhsm/rhsm-baremetal-ansible.yaml | 85 - .../securetty-baremetal-ansible.yaml | 47 - deployment/snmp/snmpd-disabled-puppet.yaml | 85 - deployment/sshd/sshd-baremetal-ansible.yaml | 130 - ...external-swift-proxy-baremetal-puppet.yaml | 76 - deployment/swift/swift-base.yaml | 49 - .../swift-dispersion-baremetal-puppet.yaml | 47 - .../swift/swift-proxy-container-puppet.yaml | 517 - .../swift-ringbuilder-container-puppet.yaml | 153 - .../swift/swift-storage-container-puppet.yaml | 802 -- deployment/tests/test-container-volume.yaml | 84 - deployment/time/ptp-baremetal-ansible.yaml | 111 - .../time/timezone-baremetal-ansible.yaml | 45 - .../timemaster-baremetal-ansible.yaml | 169 - .../timesync/chrony-baremetal-ansible.yaml | 160 - .../tls/undercloud-remove-novajoin.yaml | 57 - deployment/tls/undercloud-tls.yaml | 94 - .../tripleo-firewall-baremetal-ansible.yaml | 110 - .../tripleo-packages-baremetal-puppet.yaml | 421 - deployment/tuned/tuned-baremetal-ansible.yaml | 73 - .../unbound/unbound-container-ansible.yaml | 214 - deployment/undercloud/undercloud-upgrade.yaml | 178 - ...tripleo-validations-baremetal-ansible.yaml | 42 - doc/requirements.txt | 3 - environments/README.md | 13 - environments/auditd.yaml | 116 - environments/barbican-backend-dogtag.yaml | 17 - environments/barbican-backend-kmip.yaml | 15 - .../barbican-backend-pkcs11-atos.yaml | 39 - .../barbican-backend-pkcs11-lunasa.yaml | 49 - .../barbican-backend-pkcs11-thales.yaml | 54 - environments/barbican-backend-pkcs11.yaml | 18 - .../barbican-backend-simple-crypto.yaml | 13 - environments/cadf.yaml | 2 - environments/cephadm/ceph-dashboard.yaml | 5 - environments/cephadm/ceph-mds.yaml | 2 - environments/cephadm/ceph-rbdmirror.yaml | 2 - environments/cephadm/cephadm-rbd-only.yaml | 22 - environments/cephadm/cephadm.yaml | 26 - environments/cinder-backup-active-active.yaml | 2 - .../cinder-backup-active-passive.yaml | 2 - environments/cinder-backup.yaml | 3 - .../cinder-dellemc-powerflex-config.yaml | 35 - .../cinder-dellemc-powermax-config.yaml | 29 - .../cinder-dellemc-powerstore-config.yaml | 27 - environments/cinder-dellemc-sc-config.yaml | 39 - environments/cinder-dellemc-unity-config.yaml | 28 - environments/cinder-dellemc-vnx-config.yaml | 33 - .../cinder-dellemc-xtremio-config.yaml | 29 - environments/cinder-dellsc-config.yaml | 27 - environments/cinder-ibm-svf-config.yaml | 31 - environments/cinder-iser.yaml | 19 - environments/cinder-netapp-config.yaml | 42 - environments/cinder-nfs-config.yaml | 21 - environments/cinder-nvmeof-config.yaml | 22 - environments/cinder-pure-config.yaml | 39 - environments/cinder-volume-active-active.yaml | 8 - .../composable-roles/monolithic-ha.yaml | 46 - .../composable-roles/monolithic-nonha.yaml | 46 - environments/composable-roles/standalone.yaml | 59 - environments/compute-instanceha.yaml | 10 - environments/compute-real-time-example.yaml | 37 - environments/computealt.yaml | 14 - environments/config-debug.yaml | 6 - .../container-image-prepare-debug.yaml | 5 - environments/dcn-storage.yaml | 47 - environments/dcn.yaml | 27 - environments/debug.yaml | 5 - .../deployed-network-environment.j2.yaml | 74 - environments/deployed-networks.yaml | 2 - environments/deployed-ports.j2.yaml | 24 - ...eployed-server-deployed-neutron-ports.yaml | 9 - environments/disable-heat.yaml | 4 - environments/disable-neutron.yaml | 22 - environments/disable-paunch.yaml | 4 - environments/disable-swift.yaml | 6 - environments/docker-ha.yaml | 22 - environments/enable-designate.yaml | 24 - environments/enable-federation-openidc.yaml | 87 - environments/enable-legacy-telemetry.yaml | 28 - environments/enable-secure-rbac.yaml | 4042 ------- environments/enable-stf.yaml | 79 - environments/enable-swap-partition.yaml | 3 - environments/enable-swap.yaml | 3 - environments/external-ceph.yaml | 30 - .../external-loadbalancer-vip-v6-all.yaml | 42 - .../external-loadbalancer-vip-v6.yaml | 40 - environments/external-loadbalancer-vip.yaml | 41 - environments/external-network-vip.yaml | 5 - environments/fips.yaml | 12 - environments/firewall.yaml | 23 - environments/fixed-ip-vips-v6.yaml | 21 - environments/fixed-ip-vips.yaml | 21 - environments/ha-redis.yaml | 6 - environments/horizon_password_validation.yaml | 5 - environments/ips-from-pool-all.yaml | 55 - environments/ips-from-pool-ctlplane.yaml | 24 - environments/ips-from-pool.yaml | 13 - environments/ipsec.yaml | 2 - .../lifecycle/undercloud-upgrade-prepare.yaml | 8 - environments/lifecycle/update-prepare.yaml | 10 - environments/lifecycle/upgrade-prepare.yaml | 17 - environments/logging-environment-rsyslog.yaml | 5 - environments/login-defs.yaml | 9 - environments/low-memory-usage.yaml | 29 - environments/manila-cephfsganesha-config.yaml | 33 - environments/manila-cephfsnative-config.yaml | 19 - environments/manila-flashblade-config.yaml | 15 - environments/manila-isilon-config.yaml | 16 - environments/manila-netapp-config.yaml | 29 - environments/manila-powermax-config.yaml | 20 - environments/manila-unity-config.yaml | 19 - environments/manila-vnx-config.yaml | 18 - environments/memcached-use-ips.yaml | 6 - .../rpc-qdrouterd-notify-rabbitmq-hybrid.yaml | 22 - .../rpc-rabbitmq-notify-rabbitmq-shared.yaml | 18 - .../metrics/ceilometer-write-qdr.yaml | 15 - .../metrics/collectd-read-rabbitmq.yaml | 15 - environments/metrics/collectd-write-qdr.yaml | 9 - .../metrics/qdr-edge-only-ansible.yaml | 20 - environments/metrics/qdr-edge-only.yaml | 20 - .../qdr-form-controller-mesh-ansible.yaml | 26 - .../metrics/qdr-form-controller-mesh.yaml | 21 - environments/multipathd.yaml | 6 - .../net-2-linux-bonds-with-vlans.j2.yaml | 17 - .../net-bond-with-vlans-no-external.j2.yaml | 28 - environments/net-bond-with-vlans.j2.yaml | 24 - environments/net-dpdkbond-with-vlans.j2.yaml | 20 - environments/net-multiple-nics-vlans.j2.yaml | 18 - environments/net-multiple-nics.j2.yaml | 24 - environments/net-noop.j2.yaml | 2 - ...single-nic-linux-bridge-with-vlans.j2.yaml | 16 - ...-single-nic-with-vlans-no-external.j2.yaml | 24 - .../net-single-nic-with-vlans.j2.yaml | 22 - .../network-environment-v6-all.j2.yaml | 40 - environments/network-environment-v6.j2.yaml | 87 - environments/network-environment.j2.yaml | 85 - environments/network-management-v6.yaml | 29 - environments/network-management.yaml | 29 - environments/networks-disable.j2.yaml | 12 - environments/neutron-bgpvpn-bagpipe.yaml | 26 - environments/neutron-bgpvpn.yaml | 13 - environments/neutron-l2gw.yaml | 26 - environments/neutron-linuxbridge.yaml | 9 - environments/neutron-ml2-cisco-vts.yaml | 29 - environments/neutron-ml2-mlnx-sdn.yaml | 14 - environments/neutron-ovs-dvr.yaml | 67 - environments/neutron-sfc.yaml | 6 - environments/nonha-arch.yaml | 10 - environments/noop-deploy-steps.yaml | 5 - environments/nova-api-policy.yaml | 10 - environments/nova-az-config.yaml | 2 - environments/nova-cell.yaml | 19 - .../nova-hw-machine-type-upgrade.yaml | 10 - environments/overcloud-baremetal.j2.yaml | 10 - environments/overcloud-steps.yaml | 10 - environments/ovs-hw-offload.yaml | 27 - environments/podman-ha.yaml | 21 - environments/podman.yaml | 7 - .../predictable-placement/custom-domain.yaml | 35 - .../custom-hostnames.yaml | 33 - environments/public-tls-undercloud.yaml | 7 - environments/puppet-tenant-vlan.yaml | 4 - ...cated_ceph_env_files-e71ea73eefe8bfad.yaml | 7 - environments/rhsm.yaml | 46 - environments/securetty.yaml | 12 - environments/services-baremetal/barbican.yaml | 7 - .../services-baremetal/cinder-backup.yaml | 4 - environments/services-baremetal/collectd.yaml | 2 - environments/services-baremetal/etcd.yaml | 2 - environments/services-baremetal/manila.yaml | 4 - .../neutron-ml2-cisco-vts.yaml | 38 - .../neutron-ovn-dvr-ha.yaml | 28 - .../services-baremetal/neutron-ovn-ha.yaml | 33 - .../services-baremetal/neutron-sriov.yaml | 15 - environments/services-baremetal/octavia.yaml | 16 - environments/services/barbican-edge.yaml | 4 - environments/services/barbican.yaml | 7 - environments/services/collectd.yaml | 2 - environments/services/designate.yaml | 20 - environments/services/etcd.yaml | 2 - environments/services/frr.yaml | 13 - .../haproxy-internal-tls-certmonger.yaml | 4 - .../haproxy-public-tls-certmonger.yaml | 7 - environments/services/heat-api.yaml | 2 - environments/services/heat-engine.yaml | 2 - environments/services/ironic-conductor.yaml | 2 - environments/services/ironic-inspector.yaml | 6 - environments/services/ironic-overcloud.yaml | 11 - environments/services/ironic.yaml | 8 - ...keystone_domain_specific_ldap_backend.yaml | 15 - environments/services/manila.yaml | 4 - .../services/masquerade-networks.yaml | 4 - environments/services/netcontrold.yaml | 2 - .../services/neutron-ml2-cisco-vts.yaml | 38 - environments/services/neutron-mlnx-agent.yaml | 33 - environments/services/neutron-ovn-dpdk.yaml | 10 - environments/services/neutron-ovn-dvr-ha.yaml | 30 - environments/services/neutron-ovn-ha.yaml | 32 - environments/services/neutron-ovn-sriov.yaml | 24 - .../services/neutron-ovn-standalone.yaml | 32 - environments/services/neutron-ovs-dpdk.yaml | 15 - environments/services/neutron-ovs-dvr.yaml | 69 - environments/services/neutron-ovs.yaml | 32 - environments/services/neutron-sfc-ovn.yaml | 8 - environments/services/neutron-sriov.yaml | 15 - environments/services/octavia.yaml | 16 - environments/services/ptp.yaml | 16 - environments/services/qdr.yaml | 2 - environments/services/undercloud-haproxy.yaml | 4 - .../services/undercloud-neutron-ovn-dhcp.yaml | 31 - environments/services/undercloud-tls.yaml | 4 - environments/services/update-odl.yaml | 11 - ...t-stack-consistent-hostname-format.j2.yaml | 5 - environments/sshd-banner.yaml | 13 - environments/ssl/enable-internal-tls.j2.yaml | 42 - environments/ssl/enable-memcached-tls.yaml | 7 - environments/ssl/enable-tls.yaml | 45 - .../ssl/inject-trust-anchor-hiera.yaml | 22 - environments/ssl/inject-trust-anchor.yaml | 20 - .../ssl/no-tls-endpoints-public-ip.yaml | 14 - .../ssl/tls-endpoints-public-dns.yaml | 41 - environments/ssl/tls-endpoints-public-ip.yaml | 41 - .../ssl/tls-everywhere-endpoints-dns.yaml | 82 - .../standalone/standalone-overcloud.yaml | 91 - .../standalone/standalone-tripleo.yaml | 105 - environments/stdout-logging.yaml | 17 - .../storage/cinder-netapp-config.yaml | 107 - environments/storage/cinder-nfs.yaml | 27 - environments/storage/glance-nfs.yaml | 58 - environments/storage/nova-nfs.yaml | 34 - environments/swift-external.yaml | 13 - environments/tripleo-validations.yaml | 4 - environments/undercloud.yaml | 140 - environments/updates/README.md | 15 - .../updates/update-from-192_0_2-subnet.yaml | 3 - .../update-from-ml2-ovs-from-rocky.yaml | 7 - ...date-from-overcloud-compute-hostnames.yaml | 2 - .../update-from-publicvip-on-ctlplane.yaml | 2 - environments/use-dns-for-vips.yaml | 5 - extraconfig/all_nodes/swap-partition.j2.yaml | 49 - extraconfig/all_nodes/swap.j2.yaml | 53 - .../krb-service-principals/role.role.j2.yaml | 106 - extraconfig/post_deploy/README | 2 - extraconfig/post_deploy/default.yaml | 10 - extraconfig/post_deploy/example.yaml | 28 - .../post_deploy/example_run_on_update.yaml | 42 - .../undercloud_ctlplane_network.py | 361 - extraconfig/post_deploy/undercloud_post.py | 106 - extraconfig/post_deploy/undercloud_post.sh | 53 - extraconfig/post_deploy/undercloud_post.yaml | 163 - firstboot/.gitkeep | 0 j2_excludes.yaml | 8 - .../default-network-isolation-ipv6.yaml | 61 - .../default-network-isolation.yaml | 56 - network-data-samples/ganesha-ipv6.yaml | 23 - network-data-samples/ganesha.yaml | 23 - .../legacy-routed-networks-ipv6.yaml | 99 - .../legacy-routed-networks.yaml | 98 - network-data-samples/management-ipv6.yaml | 13 - network-data-samples/management.yaml | 12 - network-data-samples/no-networks.yaml | 6 - .../routed-networks-ipv6.yaml | 93 - network-data-samples/routed-networks.yaml | 88 - .../vip-data-default-network-isolation.yaml | 39 - network-data-samples/vip-data-fixed-ip.yaml | 35 - .../vip-data-routed-networks.yaml | 40 - network/deployed_networks.yaml | 59 - network/endpoints/endpoint_map.yaml | 9890 ----------------- network/networks.j2.yaml | 55 - network/ports/ctlplane_vip.yaml | 105 - network/ports/deployed_port.j2 | 87 - network/ports/deployed_port.network.j2.yaml | 1 - network/ports/deployed_vip_ctlplane.yaml | 40 - network/ports/deployed_vip_port.j2 | 67 - .../ports/deployed_vip_port.network.j2.yaml | 1 - network/ports/from_service.yaml | 60 - network/ports/from_service_v6.yaml | 61 - network/ports/net_ip_list_map.j2.yaml | 98 - network/ports/net_ip_map.j2.yaml | 79 - network/ports/net_vip_map_external.j2.yaml | 56 - network/ports/net_vip_map_external_v6.j2.yaml | 56 - network/ports/noop.yaml | 94 - network/ports/port_from_pool.j2 | 100 - network/ports/port_from_pool.network.j2.yaml | 1 - .../ports/port_from_pool_v6.network.j2.yaml | 2 - network/service_net_map.j2.yaml | 92 - network_data_default.yaml | 7 - network_data_undercloud.yaml | 3 - overcloud-resource-registry-puppet.j2.yaml | 512 - overcloud.j2.yaml | 1300 --- plan-samples/README.rst | 18 - .../plan-environment-derived-params.yaml | 36 - puppet/extraconfig/pre_deploy/README | 12 - .../pre_deploy/controller/multiple.yaml | 18 - puppet/extraconfig/pre_deploy/default.yaml | 8 - puppet/extraconfig/pre_deploy/per_node.yaml | 85 - puppet/extraconfig/tls/ca-inject.yaml | 62 - puppet/role.role.j2.yaml | 443 - .../notes/6.0.0-b52a14a71fc62788.yaml | 127 - ...ePublicTLS-parameter-b3fcd01af6f3c101.yaml | 9 - ...LS-CA-File-parameter-c24ee13daaa11dfc.yaml | 6 - ...nd-forwarder-support-8f29e47d4275a798.yaml | 5 - .../Add-Unbound-service-ba72830f9c75ecc3.yaml | 4 - ...t-for-ibm_svf-driver-342ca73422326f62.yaml | 9 - ...-specific-hiera-file-7a41a23017a545b9.yaml | 5 - ...e-zaqar-profile-path-7b00c68c0812fb3d.yaml | 6 - ...ntainerLogStdoutPath-20cbce05a1710d8a.yaml | 5 - ...NovaLibvirtPidsLimit-cdad2166b6c0195f.yaml | 6 - .../notes/DdpPackage-581e67f08908ed51.yaml | 5 - ...a_catalog_admin_info-006ebda240f730a2.yaml | 5 - ...e-mongodb-by-default-cce37d3254a77d27.yaml | 8 - ...able-TLS-for-libvirt-0aab48cd8339da0f.yaml | 6 - ...-snooping-for-ml2ovs-d794ed4eab7c098c.yaml | 3 - ...FernetKeys-parameter-2478cf5fc5e64256.yaml | 6 - ...UponFirstUse-boolean-a3b35fa933da1d6b.yaml | 5 - ...terface-configurable-2b634793c4f13950.yaml | 4 - ...nt-type-configurable-718be8645b7ab3a0.yaml | 5 - ...ate-memcached-debug.-41cfa69d4537cbd7.yaml | 5 - ...ovaReservedHugePages-35a13e828bfc92e9.yaml | 10 - ...-healthcheck-new-API-dae1a7ffce51e359.yaml | 6 - .../ODL_log_mechanism-99f008362d2392de.yaml | 4 - .../OVNAvailabilityZone-bc3d44a7e11d83e5.yaml | 5 - .../notes/OVNEncapTos-d3312e58600050b3.yaml | 7 - ...capType-option-added-85296d2fe1d94f35.yaml | 6 - .../notes/OvsDisableEMC-ab29e5c08856d439.yaml | 3 - ...irewallDriver-option-f4289b404abcc0b3.yaml | 12 - ...m-KeystoneUrl-output-fe4ce6f1a45849d3.yaml | 4 - ...olume_mode-parameter-5553a9b39718a749.yaml | 9 - ...n-provider-to-fernet-2542fccb5a588852.yaml | 6 - ...FernetKeys-parameter-bd635a106bb8e00f.yaml | 10 - ...calConnect-parameter-8831aad928235458.yaml | 6 - ...net_vip_map_external-c2c83431feaf7f35.yaml | 10 - ...alConfigExtraMapData-793757a2b767abe3.yaml | 5 - ...ageBackend-parameter-9dd87e751b576007.yaml | 9 - ...-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml | 9 - ...eters-neutron-ironic-3d7330a7dc82f43d.yaml | 7 - ...r-snmp-readonly-user-c90c9e5a12c92893.yaml | 6 - ...bican-client-for-dcn-7182e8bab41fce21.yaml | 13 - ...dd-barbican-hsm-code-2ceffb2e1c3f6b67.yaml | 10 - .../add-bgpvpn-support-f60c5a9cee0bb393.yaml | 4 - ...add-cadf-environment-0ce0078348c5333f.yaml | 7 - ...eilometer-agent-ipmi-2c86726d0373d354.yaml | 3 - ...inder-backend-nvmeof-023d967980fcf7b8.yaml | 4 - ...r-backup-nfs-backend-0108fba91a3058ea.yaml | 7 - ...as-secure-parameters-53f9d6a6e9bc129b.yaml | 5 - ...nfs-snapshot-support-16664aa46a67a5ad.yaml | 13 - ...bd-flatten-parameter-3951b341262488fe.yaml | 7 - ...torage-az-parameters-cddf410bc3464193.yaml | 11 - .../add-cisco_vts_ml2-fa96d8edb117c416.yaml | 6 - ...ry-limit-heat-config-14239eada092811e.yaml | 4 - ...ompute-sriov-ib-role-15baefb0dadfcbac.yaml | 5 - ...add-computevdpa-role-1f7c5df94d9900b6.yaml | 11 - ...on-logging-parameter-e51f59175c8da204.yaml | 3 - ...pher-protocol-params-d1f50f2c540c3f2b.yaml | 6 - ...ainer-registry-login-08d6a87586c84a99.yaml | 10 - ...-interface-parameter-f07c1fe24c7cb543.yaml | 8 - ...d-default-ntp-server-696b8568e09be497.yaml | 6 - ...iftdatamap-parameter-351ee63800016e4d.yaml | 6 - .../add-docker-sfc-bda84d08b119a250.yaml | 4 - ...disable-neutron-heat-6f031e2a4058a581.yaml | 5 - ...to-distributed-roles-9fe55e4d05f46874.yaml | 5 - ...point-base-parameter-852554398b9f3a19.yaml | 7 - ...able-network-subnets-7076e3b7a466d5fd.yaml | 15 - ...t-for-host-parameter-02e3d48de1f69765.yaml | 9 - ...outes-in-nic-configs-1628993fcaf6095c.yaml | 8 - ...-ipv6-diable-options-9aaee219bb87ac6a.yaml | 7 - ...c-inspector-tls-port-4e59d0c2b2922f68.yaml | 5 - ...k-rewrap-to-barbican-ef930dda85fe1e1d.yaml | 7 - ...el-package-parameter-f3ad68ed4b72b0f5.yaml | 6 - .../add-l2gw-agent-1a2f14a6ceefe362.yaml | 3 - ...add-l2gw-api-support-2206d3d14f409088.yaml | 3 - .../add-ldap-backend-0bda702fb0aa24bf.yaml | 5 - ...lanox-sdn-ml2-docker-58d242b5a8c40ade.yaml | 5 - ...-glance-tls-internal-5d8e46650b174626.yaml | 6 - ...-discovery-parameter-a113a6de8a220408.yaml | 7 - ...mistral-event-engine-3a33700c20c88e86.yaml | 4 - ...add-mistral-to-roles-52525fe9689e60b1.yaml | 4 - ...istral-volumes-param-a97418faf7a09022.yaml | 7 - ...nsible-containerized-de68f6c2fd6e05fc.yaml | 3 - .../add-networking-sfc-72cd16bb34075150.yaml | 4 - ...duler-worker-support-0ab66160b936a0c0.yaml | 4 - ...novamkisofscmd-param-6dbb64e4497a8ce6.yaml | 7 - ...-affinity-parameters-fe9222f17b16ee1f.yaml | 4 - ...a-cert-key-variables-48133267832ee196.yaml | 4 - ...driver-agent-support-470fa6fd769c8428.yaml | 6 - ...g-offload-parameters-dc645daf909a85c2.yaml | 3 - ...log-offload-protocol-dfc5d8387c3d83bc.yaml | 5 - ...octavia-provider-ovn-e3780665300e7c58.yaml | 5 - ...octavia-tls-settings-7583df511c53b27a.yaml | 4 - .../add-odl-sriov-env-e31982064c2bf646.yaml | 4 - .../add-opendaylight-ha-e46ef46e29689dde.yaml | 5 - ...ctivity-probe-config-a89f6dcd204192a8.yaml | 6 - ...-packages-on-upgrade-5a469428a6948148.yaml | 12 - ...d-ovn-db-endpointmap-2f75dea0b3aa4513.yaml | 15 - ...-dbs-cluster-support-6193cba5be432865.yaml | 14 - .../add-panko-expirer-bac4243971fc23e5.yaml | 4 - ...r-baremetal-disabled-4bc4e648da33f4da.yaml | 5 - ...ovsdb-probe-interval-243e66290a72eb3c.yaml | 6 - ...ovsdb-probe-interval-7ec6aa7f68b9a133.yaml | 8 - ...ernelargsdeferreboot-5b8e8b2f64c64a91.yaml | 7 - ...heat-apis-over-httpd-df83ab04d9f9ebb2.yaml | 6 - ...ctlscafile-parameter-0fd9c19dcd20be0b.yaml | 6 - ...dd-purge-nova-tables-e0706cdcffa0f42e.yaml | 6 - .../notes/add-qdr-99a27dffef42c13e.yaml | 8 - ...d-rabbit-fips-option-ddf5a0d7e37d8e5d.yaml | 5 - ...emoval-policies-mode-6869362fbeed2cd2.yaml | 6 - .../notes/add-s3-driver-da8a31a3c646892e.yaml | 4 - ...-collect-config-data-eeea2f57b3a82654.yaml | 6 - ...e-auth-configuration-cb6537998a37f785.yaml | 4 - ...-for-IPv6-deployment-4c5b577cfb38c416.yaml | 4 - ...bican-pkcs11-options-a2ec14369518b40e.yaml | 9 - ...port-for-pure-cinder-1a595f1940d5a06f.yaml | 3 - ...nila-cephnfs-backend-fb35ecc995705256.yaml | 7 - ...g-ovn-metadata-agent-3bfecfbabd6d9628.yaml | 8 - ...-more-octavia-params-1e4a32f910e5f1fc.yaml | 6 - ...n_transparent-config-5623f8cffc8b41f0.yaml | 5 - ...os_interface_support-740a666a4ec629fc.yaml | 5 - ...cisco_vts_ml2_docker-0d7cfafe4647074d.yaml | 5 - ...d_compute_manage_ksm-86fcbd1dc9a193ca.yaml | 7 - .../add_db_sync_timeout-c9b2f401cca0b37d.yaml | 3 - ..._no_ntp_to_ipaclient-048fdfccf0cb7835.yaml | 7 - ...ltiple_cinder_stores-74eea265ee795660.yaml | 5 - ...age_cache_prefetcher-288120ffa6ee2a13.yaml | 6 - ...image_cache_settings-50af5ff56a7d7f75.yaml | 10 - ...sk_devices_to_attach-e219429d61a64bb0.yaml | 7 - ...ts_plugin_to_default-8acb69b112d4b31c.yaml | 5 - ..._to_configure_ulimit-2359aa058da58054.yaml | 15 - ...dd_site_id_cisco_ml2-60cfa450637d4fe0.yaml | 5 - .../add_token_auth-fb9f0dfe8e70a4c1.yaml | 10 - ...ributed_image_import-97458c56a1ad95d9.yaml | 7 - ...ys-set-cka-sensitive-7a9dc31290899cac.yaml | 5 - .../adding-rear-service-5fac71fa6fbd9c9e.yaml | 5 - ...cachedmaxconnections-b591c0fa39e821f5.yaml | 6 - ...s-post_upgrade_tasks-eba0656012c861a1.yaml | 12 - .../agent_down_time-d6aebe530ca31b9e.yaml | 8 - releasenotes/notes/aide-50fc91178430f1a5.yaml | 12 - ...al-puppet-deprecated-25388c875e1d9884.yaml | 5 - ...-via-undercloud.yaml-8766b43a20a4270f.yaml | 14 - ...nodes-extra-map-data-b8c8829dfa7f1c26.yaml | 6 - .../allocation_ratio-4a8ecf4cdf5fb7e2.yaml | 13 - ...e-image-member-quota-59e228bc62ec7792.yaml | 6 - ...-network-calculation-536c70391497256d.yaml | 8 - ...-resize-to-same-host-62f05a5370993425.yaml | 5 - .../notes/aodh-expirer-6631733d237a872e.yaml | 10 - .../apache-timeout-d61a3910ce7dc162.yaml | 5 - .../notes/api-policy-4ca739519537f6f4.yaml | 13 - .../auditd-to-ansible-f39bd119bf25320e.yaml | 13 - .../automated-lvmfilter-3bee670c0108585a.yaml | 23 - .../barbican-thales-ha-581fbe9b5ef4dc87.yaml | 11 - ...baremetal-cell-hosts-cd5cf5aa8a33643c.yaml | 6 - .../baremetal-role-34cb48cc30d7bdb4.yaml | 5 - .../big-switch-agent-4c743a2112251234.yaml | 5 - ...stnames-stack-output-02c9f93f9d1ce7d8.yaml | 4 - ...uestore_disks_config-f5553b0540237c4c.yaml | 4 - ...in-neutron-container-1e7835e4e7292492.yaml | 4 - .../notes/bug-1741452-6f73b550d94b8f6f.yaml | 5 - .../notes/bug-1741464-78403f89e9dc5d75.yaml | 6 - ...a-share-typo-upgrade-82d9b3edea77b94a.yaml | 4 - .../notes/bug-1782123-7b08ab6503033dd3.yaml | 6 - ...ync-overcloud-deploy-f323d85509ca81ec.yaml | 7 - .../notes/bug-1794268-0f875aa640b4246e.yaml | 6 - .../notes/bug-1797130-b40e402a9bffe2f0.yaml | 13 - .../notes/bug-1823274-ca992c1055035c7b.yaml | 7 - ...-fix-netapp-env-file-e5aeb0c2bf417d2b.yaml | 6 - ...nila-share-protocols-6ea6bcbbe21b25ee.yaml | 7 - ...-name-lower-override-7ea2635877b753c7.yaml | 8 - .../notes/bug-1895899-8d675670a0d05c15.yaml | 12 - .../notes/bug-1904482-dbc5162c8245a9b3.yaml | 21 - .../notes/bug-1907214-df2f07cbacbe8a24.yaml | 13 - ...3672-haproxy-rsyslog-6f8e386f8909a253.yaml | 4 - .../notes/bug-1984222-bd32dbd29741ad51.yaml | 4 - .../ca-certs-ansible-7a06114f0571d7f3.yaml | 5 - ...ystone-notifications-f1e463d0ec939b22.yaml | 5 - ...scovery_off_computes-2b977c6b9a01cde2.yaml | 13 - ...le-blacklist-support-164b5726cc4532b7.yaml | 5 - ...eph-ansible-workflow-70f7d52faf4cd419.yaml | 14 - ...h-hci-osd-count-type-241614543ef7cc58.yaml | 6 - ...ls-with-ceph-ansible-f82425e585f90ef6.yaml | 17 - ...d_composable_service-f72dc4b1cd007734.yaml | 9 - ...tion_to_ceph_ansible-fd1a8cb659ac2401.yaml | 9 - ...xternal_multi_config-80d707e5bf75e886.yaml | 5 - .../notes/cephadm-28185ca8ac814567.yaml | 17 - ...tificiate-validation-1b08ab8cf40b7cad.yaml | 11 - ...ange-db-sync-timeout-57abe3e48d741842.yaml | 6 - ...-ironic-tftp-service-21cc25629b15992f.yaml | 7 - ...ystone-admin-network-a29499018c323cea.yaml | 4 - ...-ntp-default-servers-5c4cb51c10ffe60e.yaml | 10 - ...hange-panko-api-port-fb8967420cd036b1.yaml | 4 - ...-mode-policy-default-6c6cd7f02181f0e0.yaml | 11 - ...wift-worker-defaults-b98f12fb2f677bf1.yaml | 6 - ...tronPluginExtensions-b31ed1c44fec4568.yaml | 5 - ...ault_docker0_address-6a017b7078825996.yaml | 7 - ...lble-network-in-role-7860d8d5cd1df4b0.yaml | 34 - ...old-style-nic-config-4624a60e3303411b.yaml | 5 - ...onfigured-by-default-859e44745491027a.yaml | 7 - ...d-timeout-parameters-54550a6e1c11c0b9.yaml | 6 - ...backup-active-active-2eb8f8cf612a7989.yaml | 8 - ...ompression-algorithm-337a6708264cb84a.yaml | 7 - ...r-backup-concurrency-dc7627c617d36133.yaml | 8 - ...ckup-gcs-s3-backends-7dc04376150164fc.yaml | 5 - ...p-other-ceph-cluster-36852bf2edfd11a7.yaml | 8 - ...-default-volume-type-c77e7a7ddafdf172.yaml | 12 - ...-default-volume-type-cc21a256202eb476.yaml | 7 - ...llemc_unity-backends-96c6d2764209e317.yaml | 7 - ...dellemc_vnx-backends-930c8e9309a6ccf5.yaml | 7 - ...iple-netapp-backends-71ed7fef3008e654.yaml | 7 - ...ultiple-nfs-backends-a6b7a107be066596.yaml | 12 - ...ltiple-pure-backends-4537532f428315e1.yaml | 7 - ...r-nfs-conversion-dir-ba9b8dce515808ce.yaml | 8 - ...hot-support-disabled-2d2e08c97537bc94.yaml | 14 - .../cinder-nvmeof-fixes-d3f53887e0dde454.yaml | 8 - ...nder-rbd-multiconfig-dff6b46a0b20331a.yaml | 6 - ...inder-remove-dell-ps-fce96d05f529d0da.yaml | 5 - ...one-project-personas-b71c35c4c1a8bbd8.yaml | 9 - .../cinder-v1-cleanup-7154ca07652804cf.yaml | 11 - .../cinder-v3-endpoints-498e00f96e77f3c6.yaml | 5 - ...volume-active-active-976f2bc33ab52c94.yaml | 8 - ...efault_switch_to_A-A-4adb439183be78f0.yaml | 11 - ...backend_new_template-21243ef33346ed9a.yaml | 9 - ...backend_new_template-b25b017fdd7c90c1.yaml | 9 - ...-heat-api-cloudwatch-f12315e86c7f12d4.yaml | 5 - .../cleanup-nova-params-cc7c501dcb9879d0.yaml | 7 - ...eanup-odl-clustering-93a3ec132f3c2343.yaml | 6 - .../cleanup-xinetd-e2f59fda914b32d9.yaml | 6 - .../collectd-amqp1-a1d8773e8e7ff80e.yaml | 5 - ...td-overcloud-gnocchi-049a63bbd196a9bb.yaml | 5 - .../collectd-polling-4aac123faaebd1bc.yaml | 6 - ...esses-plugin-default-1d07c9ddfde0c2f7.yaml | 6 - .../collectd-write-qdr-f790703e3f0c3245.yaml | 16 - .../notes/composable-ha-37e2d7e1f57f5c10.yaml | 12 - ...able-network-subnets-fbfcb6283a54ace7.yaml | 48 - ...posable-network-vips-4d7b28fa3769d38b.yaml | 4 - .../composable-upgrades-d9ec7c634365e8e0.yaml | 14 - ...as-hyperscale-driver-e7f0a35d7d9a8df1.yaml | 3 - ...mpute-cpu-shared-set-be9da772ac8f7582.yaml | 16 - ...s-state-on-host-boot-090507db3ffad0d4.yaml | 12 - ...ompute-tuned-profile-cbe4e22b1e185c5e.yaml | 7 - ...te_deprecated_params-a2d69efd75f7c50f.yaml | 9 - .../computeovsdpdk-role-67d53a405ce4174b.yaml | 4 - ...load-default-to-true-2331debd56c396eb.yaml | 10 - ...urable-snmpd-options-3954c5858e2c7656.yaml | 6 - ...configure-ip-forward-268c165708cbd203.yaml | 5 - ...configure-multipathd-d3fb4683b9ffbb4b.yaml | 6 - ...tipathd-with-ansible-f32f3ea627815191.yaml | 20 - ...snat-in-opendaylight-d5ed4d62275e1876.yaml | 5 - .../congress_removal-95dd16335fbb8bce.yaml | 5 - ...-post-deploy-configs-bc251a5446e5615d.yaml | 5 - ...ontainarise-barbican-1253606411d497ff.yaml | 4 - ...ainer-images-prepare-c446ed0b04c587f7.yaml | 14 - ...ner-logging-novajoin-90398bce0bc1a7b9.yaml | 7 - .../notes/container_cli-64a217e8abb508c4.yaml | 5 - ..._image_prepare_debug-52fcb324633d2cf0.yaml | 6 - ...lbaas-service-plugin-20562487d6631c88.yaml | 5 - ...tainerized-ceph-role-1e02fd11551e92ac.yaml | 17 - ...erized-services-logs-0dc652513870f46d.yaml | 11 - ...ized-tempest-support-0ceaaf6427ce36e9.yaml | 6 - ...ontainers-as-default-37bbe8afa0a60c2b.yaml | 16 - ...ners-selinux-context-13b720cc1d5ec6f2.yaml | 16 - ...es-and-dpdk-enabling-0233a06e23259660.yaml | 9 - ...er_deprecated_params-7f009de6d17c05a4.yaml | 8 - ...verge-var_run-to-run-20286a74e780e999.yaml | 4 - ...ource-name-to-number-80ada6c825554f56.yaml | 5 - ...rosync_token_timeout-d674c873c505cfe6.yaml | 6 - .../notes/cpu-flags-5b027db3eb2b86c2.yaml | 7 - .../notes/cpu-resources-f8b511d39c6e0cfe.yaml | 13 - ...te-br-ex-for-compute-b967312323d45d9b.yaml | 9 - ...oud-conf-host-routes-00e981d1f00405d8.yaml | 12 - .../ctlplane_fixed_ip-81d14db5a01fa531.yaml | 8 - ...custom-network-names-c62a57827d98eea5.yaml | 8 - .../dcn-environments-96379042a22f895e.yaml | 4 - ...n-hci-storage-rename-0b1c17dd50f4cc9a.yaml | 8 - .../debug_per_service-54a260917c4a7e3a.yaml | 9 - ..._workers_neutron_api-eb5820d6bcedb53b.yaml | 6 - ...-bootstrap-server-id-eff49ef40bfde414.yaml | 5 - ...h-pub-key-to-keypair-70377d43bf76a407.yaml | 3 - ...-ovn-to-raft-cluster-9a1fb213d28d0650.yaml | 16 - ...llemc-driver-cleanup-4a1febdc51ec674c.yaml | 14 - ...deprecate-old-driver-f428e372280c44e6.yaml | 5 - .../notes/dellsc-driver-d7cd300a24a64b01.yaml | 6 - .../notes/dellsc_cinder_a5572898724a11e7.yaml | 5 - .../deploy_steps_tasks-bb2279bcda7cd04c.yaml | 5 - ...r-environment-output-d838c782f76823b7.yaml | 6 - ...erver-firewall-purge-9d9fe73faf925056.yaml | 6 - ...config-static-bridge-c15bf767d3a28759.yaml | 7 - .../deployed-servers-fd47f18204cea105.yaml | 8 - ...deprecate-DnsServers-a65541c2dceced08.yaml | 15 - ...te-IronicIPXEEnabled-6cb68b0bfee5ac4d.yaml | 5 - ...sqlIncreaseFileLimit-20548c5f7234d14c.yaml | 5 - ...xternalNetworkBridge-7d42f1a0718da327.yaml | 10 - ...viceAddressesMapping-b4af9f1a48b66854.yaml | 6 - ...sOverrides-parameter-40820bd943e20018.yaml | 5 - ...e-ceilometer-expirer-83b193a07631d89d.yaml | 11 - ...e-ceilometer-workers-72b01f4bc6423f48.yaml | 3 - .../deprecate-collector-a16e5d58ae00806d.yaml | 14 - ...oyed-server-port-map-99d4d6a01f630d04.yaml | 7 - ...eprecate-docker-yaml-fa75b14e93fdf763.yaml | 7 - ..._migration_sshd_port-b99c1ba0ac012276.yaml | 5 - ...andles_share_servers-d6af4e7c25c34c6e.yaml | 8 - ...e-duplicate-nfv-envs-615d5b97bae6f9a9.yaml | 5 - ...-etcd-tls-workaround-de5dd1fc19dae5b2.yaml | 14 - .../deprecate-fluentd-049bce4cb556962f.yaml | 6 - ...te-ironic-ip-version-27e24f03da6ddd59.yaml | 4 - ...-kubernetes-services-69ca5ec733d24644.yaml | 4 - ...cate-management-envs-bbc7fddb0ca871af.yaml | 5 - ...phfs-enable-snapshot-7857604802378720.yaml | 6 - ...hfsnative-parameters-e24978c8b344573b.yaml | 11 - ...ecate-misspelt-param-34fee48da69fc480.yaml | 12 - ...e-networking-ansible-11004424696a71a4.yaml | 5 - ...eutron-host-services-93a7187fdebe0302.yaml | 9 - ...utron-server-mapping-af17af3d8e09ade4.yaml | 10 - ...e-neutronsriovnumvfs-96c9de5c6f071aeb.yaml | 11 - ...g-download-templates-3971fbf85b93846d.yaml | 7 - ...-numa-live-migration-5e0601c7d26a8f3c.yaml | 5 - ...uler_default_filters-122a7448d808dd82.yaml | 9 - .../deprecate-novajoin-ef06b1ca33a2b80c.yaml | 28 - ...opendaylight-service-64b960923324edc4.yaml | 5 - .../deprecate-panko-b2bdce647d2b9a6d.yaml | 5 - ...uppet-selinux-config-cc8d2788c534d628.yaml | 5 - ...moved-netapp-systems-15c58d7e807ba33a.yaml | 18 - .../deprecate-sahara-8f0bc905e3d21af2.yaml | 5 - .../deprecate-sensu-dd2e8933cb147166.yaml | 4 - ...e-sevice-ipv6-params-e301590647a0c8f5.yaml | 7 - .../deprecate-snmp-084d9e625f4f4e56.yaml | 5 - .../notes/deprecate-vpp-2911807152cd4b62.yaml | 7 - ...-xinetd-service.yaml-d7594bf8a7b714e2.yaml | 7 - .../deprecate-zaqar-622f409474813783.yaml | 4 - ...e_auth_uri_parameter-bdebdc6614ce8b7e.yaml | 4 - ...e_ceph_pg_num_in_tht-91fe6303cc5ba07e.yaml | 13 - ...deprecate_docker_all-40eb568c9234a3d8.yaml | 5 - .../deprecate_ec2_api-811ac17575077b2a.yaml | 5 - .../deprecate_mistral-9136fd5e41b37e0d.yaml | 4 - ...ed-server-roles-data-464b3147ae7434d3.yaml | 6 - ...ated-vn-ha-env-files-f0d967d173dcdb16.yaml | 7 - .../deprecated_services-172a1ae6348e6c52.yaml | 4 - .../deprecation-573102-e63aeba5930adf28.yaml | 7 - ...rams-custom-plan-env-3a810ff58a68e0ad.yaml | 4 - ...-enable-bind-logging-642e77541645519b.yaml | 6 - ...ate-environment-file-bd08eef69758a996.yaml | 6 - ...e-redis-coordination-b4afdcc8855cc0ca.yaml | 4 - .../designate-support-b66127d9f4995db2.yaml | 5 - ...ct-deploy-by-default-bc78a63f0a0c6e15.yaml | 12 - .../disable-ceilo-api-dfe5d0947563bbe0.yaml | 4 - ...ble-ceilo-middleware-6853cb92e3e08161.yaml | 5 - ...-for-setuid-programs-e83a2a5da908b9c3.yaml | 12 - ...-heat-api-cloudwatch-d5a471da22472bde.yaml | 15 - ...on-lifecycle-actions-d551fe4551d71770.yaml | 10 - ...r-for-icmp-redirects-f325f91d71b58b5f.yaml | 19 - ...snapshots-by-default-d5320a05d9b501cf.yaml | 5 - ...rized-swift-services-4f7edd98203d749b.yaml | 5 - ...-notification-driver-a888d4e9b8eed1dc.yaml | 6 - ...default_apache_vhost-f41d11fe07605f7f.yaml | 6 - .../distributed-roles-a3a5a5d9eb25e6b0.yaml | 8 - ...nf-streams-interface-c7191173efb7b139.yaml | 8 - .../notes/docker-bip-9a334c8f31a59b96.yaml | 10 - ...puppet-config-volume-5ad50b90dc24672b.yaml | 10 - ...er-service-all-roles-5c22a018caeafcf0.yaml | 5 - .../docker_puppet_tasks-e74637224ee66f66.yaml | 5 - ...unregister-on-delete-9708f7cbc73a0d2f.yaml | 8 - .../dpdk_driver_remove-76d61711c19cd099.yaml | 7 - .../drop-baremetal-aodh-22cb1dfc59204427.yaml | 4 - ...rop-baremetal-glance-5836be7624e63133.yaml | 3 - ...op-baremetal-haproxy-5e2f0f3c9b8da664.yaml | 4 - ...rop-baremetal-ironic-ab7af5d2b75c9cbf.yaml | 3 - ...baremetal-keepalived-c48cde2007b372a4.yaml | 4 - ...p-baremetal-keystone-000a4babb7f8ef60.yaml | 4 - ...-baremetal-memcached-6cbdfd80bef14a41.yaml | 4 - ...remetal-mysql-server-a36a2f39f88c8181.yaml | 4 - ...drop-baremetal-redis-2e2f221b9ee6f9cc.yaml | 4 - ...rop-baremetal-sahara-f2922322511047de.yaml | 4 - ...drop-baremetal-zaqar-abe0f9cae9c87879.yaml | 3 - ...ric-driver-templates-b33e8966c263a1fd.yaml | 7 - ...dundant-metric-param-ddea256079fbbbe0.yaml | 6 - ...with-dots-deprecated-52d4c77e09821e87.yaml | 10 - .../enable-arp_accept-6296b0113bc56b10.yaml | 9 - .../notes/enable-cache-293c39b3b6f55c80.yaml | 6 - ...deep_compare-fencing-698cec642ecd54a4.yaml | 6 - ...extension-by-default-4a620563ff5954d6.yaml | 5 - .../enable-dpdk-on-boot-f5b098b10152b436.yaml | 8 - ...e-glance-image-cache-01e54adaa952f90b.yaml | 7 - ...g-suspicious-packets-d5545586f917d2ca.yaml | 9 - ...-with-ironic-service-8c2909023e3896a5.yaml | 6 - ...on-lbaas-integration-b72126f2c7e71cee.yaml | 4 - ...on-lbaas-integration-fa999ccd548ee6b6.yaml | 4 - .../enable-ntp-iburst-efbc24a43a72daae.yaml | 7 - ...ia-flavor-by-default-d356fbb265508f76.yaml | 7 - ...orwarding-in-neutron-956cb21a3310e881.yaml | 8 - ...ure-rbac-for-neutron-842bd41339a48f26.yaml | 8 - ...no-resource-registry-db6ee6319964ab7f.yaml | 20 - ...external-swift-proxy-941917f8bcc63a5d.yaml | 5 - ...nable-vfio-for-sriov-62b7bd67df250840.yaml | 8 - ..._sparse_image_upload-fe9e594f91cf57f1.yaml | 7 - ...onversion_parameters-740e5282385444c7.yaml | 7 - ..._aggregate_filtering-2aec5a693bf79852.yaml | 12 - ...re_rbac_for_keystone-62685484ef589726.yaml | 9 - ...c_support_for_glance-167d53c491cd326c.yaml | 8 - ...-metadata-properties-72cdc946748e9b1b.yaml | 7 - .../notes/endpointmap-8825fcd5fa5a2ba2.yaml | 7 - ...debug-params-boolean-b1256f282e414b98.yaml | 6 - ...ure-debug-is-boolean-aecc8fbb6c8fd368.yaml | 6 - ...erl-sbwtdcpu-sbwtdio-b26506a0430480dc.yaml | 9 - ...t-deployment-outputs-1377270acbc5bc7e.yaml | 5 - .../notes/etcdtoken-4c46bdfac940acda.yaml | 6 - .../notes/example-roles-d27c748090f6a154.yaml | 6 - ...ric-processing-delay-0c098d7ec0af0728.yaml | 3 - ...y-default-on-compute-f3ff6bf46ab80640.yaml | 15 - ...ernal-update-upgrade-2d7bd96959ace08d.yaml | 11 - ...nal_ceph_environment-05a1405bce969060.yaml | 15 - ...al_post_deploy_tasks-1b6423b73ae083cc.yaml | 6 - ...-notification-topics-8be1d37afd90b910.yaml | 9 - .../extra_group_vars-aafa71945882442f.yaml | 7 - ...extraconfig_services-18ca28841974b8dd.yaml | 5 - ...orward-upgrade-tasks-a8b5d64831a71e24.yaml | 8 - ...cript-to-switch-repo-a65db91760b46ec2.yaml | 30 - ...figureDelegatedRoles-d16ddb5eb037c4c8.yaml | 10 - ...-filter-empty-string-b024ce1bce9754ef.yaml | 11 - ...-StorageNFS-net.yaml-bd77be924e8b7056.yaml | 20 - ...etwork-resource-name-41bbf4258a0174eb.yaml | 5 - .../notes/fix-bridge-nf-call-defaults.rst | 6 - ...nder-nfs-share-usage-0968f88eff7ffb99.yaml | 6 - ...alived-needs-restart-6d7efbb9788e0f95.yaml | 7 - ...r-and-tls-everywhere-1f2300f9a2ba4d98.yaml | 7 - ...x-default-masquerade-f91bfbcfe8a2a87d.yaml | 7 - ...power-state-interval-90778dbcb0104130.yaml | 6 - ...hen-network-disabled-156190243ff239ea.yaml | 6 - ...mic-network-disabled-9f700a9e900221b6.yaml | 5 - ...multiple-nics-config-43b8dc459b5694b2.yaml | 7 - ...role-count-greater-1-10ce2010556e5b76.yaml | 5 - ...x-glance-api-network-4f9d7c20475a5994.yaml | 3 - ...ndition-for-rhel-reg-311a3dce76cc0ec1.yaml | 9 - ...tftp-folder-creation-36e73d187334dbbe.yaml | 7 - ...ironic-pxe-tftp-ipv6-a315184667e262b3.yaml | 8 - ...lter-on-vip-in-jinja-c8f996ffed94d3cd.yaml | 7 - ...vice-net-map-replace-463dd1296766cc47.yaml | 6 - ...ng-roles-for-octavia-ca024308bf15f0cf.yaml | 4 - ...sing-tacker-password-c2ce555cdd52c102.yaml | 4 - ...-cert-key-perms.yaml-efcc17f188798cc4.yaml | 5 - ...fix-neutron-dhcp-tls-d62383530419f0bd.yaml | 6 - ...eutron-dpdk-firewall-436aee39a0d7ed65.yaml | 5 - ...utron_admin_auth_url-c88224251d8eb807.yaml | 4 - ...ent-failed-on-reboot-373a31d28ea72587.yaml | 5 - ...anager-firewall-rule-cdffe31d580ecf4b.yaml | 4 - ...log-facility-default-7b6d0670a51fe845.yaml | 5 - .../fix-odl-gui-feature-6525b8c6807fb784.yaml | 5 - ...ix-odl-karaf-logging-eca10973e57caa3a.yaml | 12 - ...l-missing-etc-config-87c33bc05f692f44.yaml | 5 - ...llowed-network-types-d196d6d40fadb1bc.yaml | 6 - ...dl-ovs-vhostusermode-7bc2b64fd2676ca2.yaml | 10 - ...ovider-mapping-hiera-5b3472184be490e2.yaml | 4 - .../fix-odl-tls-docker-67b8453a6be7fa58.yaml | 4 - ...update-upgrade-cache-83004abe108971c1.yaml | 6 - ...l-websocket-firewall-9e2f78ebaa39313f.yaml | 6 - ...rom-pool-host-routes-7fcc4d00cb11603d.yaml | 8 - ...blic-cert-generation-41c75be0b07a48fe.yaml | 5 - ...deploy-artifact-urls-03d5694073ad159d.yaml | 4 - ...ice-auth-url-octavia-90f19c835cb1cc0a.yaml | 4 - ...place-hostname-issue-393fb6954f12f7e6.yaml | 10 - ...tenant-net-name-type-94a9c50c86529001.yaml | 5 - ...x-tls-neutron-agents-c40d5fc779d53bfa.yaml | 6 - ...bnet-defaults-merged-5a6061b29618f5cf.yaml | 9 - .../notes/fix_nova_host-0b82c88597703353.yaml | 9 - ...io-max.nr-on-compute-06447122b8e12c71.yaml | 7 - .../notes/frr-support-21648d0660a810ac.yaml | 15 - ...lera-sst-mariabackup-5a667eed1787353f.yaml | 17 - ...ather-facts-variable-d7f1d74d1dc68ee9.yaml | 4 - ...certs_key_passphrase-229a677df1b7f6e0.yaml | 6 - ...-sample-environments-8b523f55f36e940c.yaml | 21 - ...nfig-local-connector-5bbec3f591a9f311.yaml | 10 - .../glance-db-purge-bb185353a45880c7.yaml | 6 - ...nce-internal-service-86274f56712ffaac.yaml | 26 - .../glance-keystonev3-d35182ba9a3778eb.yaml | 4 - .../glance-multistore-82d4fc260acfb355.yaml | 12 - .../glance_image_copy-34a3e62c48eaee0e.yaml | 13 - ...e-policy-deprecation-09c086e9a0f9af86.yaml | 5 - .../gnocchi-keystonev3-d288ba40226545c9.yaml | 4 - .../gnocchi-nfs-backend-90febc9f87e7df08.yaml | 9 - .../notes/gnocchi-tmp-b5b48708c5dd17df.yaml | 8 - ...ame-in-ServiceNetMap-efffd0583bab827f.yaml | 4 - ...oxy-frontend-backend-b253d26eeadbfb6b.yaml | 8 - .../notes/haproxy-log-2805e3697cbadf49.yaml | 4 - ...healthcheck_disabled-7e67b45cda9afb17.yaml | 5 - ...api-wsgi-timeout-600-640058f1ae18232c.yaml | 5 - .../hiera_net_ip_map-ff866b443a28bdc4.yaml | 9 - .../notes/horizon-hsts-43ac1c7b602a4381.yaml | 6 - ...izon-session-timeout-ba4a62b5f38cbd11.yaml | 7 - ...izon_keystone_member-838d4b65cc0b0d11.yaml | 5 - ...horizon_logger_debug-cd70c45c1b695e4b.yaml | 8 - ..._prep_config_removal-f579718021db5385.yaml | 6 - .../hw-machine-type-6a0bd7bc3973c15a.yaml | 7 - ...reserved-host-memory-80434e8484a29680.yaml | 4 - .../innodb-tuning-param-e71d2fd727c450ec.yaml | 6 - ...certs_key_passphrase-908471f31d09f088.yaml | 5 - ...ll-openstack-selinux-d14b2e26feb6d04e.yaml | 6 - .../notes/instance-ha-18e59ab0e9697ef3.yaml | 6 - .../integrate-skydive-900756d8870b3876.yaml | 5 - ...ducing-qemutlsverify-af590e0243fe6b08.yaml | 9 - .../notes/ipa-mkhomedir-c126291bcbdd0111.yaml | 5 - .../notes/ipsec-8e24afb777de48e3.yaml | 11 - .../notes/ipv6_defaults-7dbb62113f4e5084.yaml | 10 - .../notes/ipxe_timeout-5824c87e849b1b50.yaml | 5 - .../ironic-boot-mode-07df530490bead50.yaml | 6 - .../ironic-boot-option-3f3036aa5e82ec7e.yaml | 12 - ...nic-cleaning-network-1e06881df0402221.yaml | 10 - ...nic-conductor-groups-577543f8ca612f06.yaml | 13 - ...ateful-address-count-ca568a32f07aec53.yaml | 7 - ...ronic-hardware-types-fe5140549d3bb792.yaml | 9 - .../ironic-inspector-43441782bdf0f84e.yaml | 5 - ...-inspection-ip-range-f399a938d50daadd.yaml | 25 - ...e-dnsmasq_ip_subnets-abba77307e761b96.yaml | 8 - ...networking-baremetal-29d9ad465565bb87.yaml | 4 - ...-neutron-integration-76c4f9e0d10785e4.yaml | 9 - .../notes/ironic-rescue-cb1edecce357fc0b.yaml | 5 - ...r-group-and-networks-9c5c409253ebe3c8.yaml | 14 - ...ironic-rpc-transport-b637fd2a3b99fee7.yaml | 11 - .../ironic-workers-e752707221a86c36.yaml | 5 - ..._cleanup_config_data-1d4ae909c0869a90.yaml | 15 - ..._power_notifications-f9977e3179b68868.yaml | 15 - .../notes/isilon_manila_e9677898724a11e7.yaml | 4 - ...epalived_depcrecated-12ac4e1d59d29e1d.yaml | 8 - ...eepalived_deprecated-e0b20da2d51714b7.yaml | 6 - .../keepalived_removed-04c52519d7b33acb.yaml | 6 - .../notes/kernel-extra-aa48704056be72cd.yaml | 6 - .../kernel_sysctl_role-d4f6a50d08b7a388.yaml | 7 - ...-uuid-token-provider-a4c4827c1a05556b.yaml | 7 - ...-notification-driver-0c71165430eb57ef.yaml | 7 - .../keystone_authtoken-655da476bbf82e1c.yaml | 3 - .../keystone_internal-53cc7b24ebdd9df4.yaml | 9 - .../l2gw-driver-change-1f5b11d5676c5015.yaml | 3 - .../l3_agent_radvd_user-1814df18745101d6.yaml | 6 - ...tellite-repo-enabled-8b60528bd5450c7b.yaml | 6 - .../notes/libvirt-debug-0bf95db421329ff6.yaml | 9 - ...ibvirt-virtlogd-logs-377ff4083a58cde4.yaml | 8 - .../libvirt_swtpm_logs-a3ba8c5dcbd53c8c.yaml | 5 - .../notes/libvirtd-tls-6de6fb35e0ac0ab1.yaml | 6 - ...ind_mounts_for_certs-64cb88f78538a64b.yaml | 13 - ..._addr_all_transports-2fc9cd74d435a367.yaml | 9 - ...migration_port_range-54c28faf0a67a3fc.yaml | 11 - ...on_wait_for_vif_plug-6d16da261a138fb8.yaml | 8 - .../logging-paunch-7fa8570b380a7ebd.yaml | 4 - ...ate-containers-purge-a5587253fe6cbb28.yaml | 15 - .../maint-cleaning-153aa746d92a5292.yaml | 11 - ...ntain-lists-of-cidrs-19bc98dc9bc6b819.yaml | 29 - .../make-panko-default-8d0e824fc91cef56.yaml | 4 - .../manage-networks-9b1e5be19180deff.yaml | 10 - .../manila-db-purge-811512391617216d.yaml | 6 - ...la-with-managed-ceph-e5178fd06127624f.yaml | 11 - ...ith-NeutronEnableDVR-fe8aac6c4ce52bce.yaml | 6 - ...x-active-fernet-keys-f960f08838a75eee.yaml | 5 - .../notes/mdev_types-84da20d1791afbd2.yaml | 4 - ...he_use_advanced_pool-41ca18221e60c05a.yaml | 7 - ...memcached-max-memory-ef6834d17953fca6.yaml | 7 - ...mcached-verbose-logs-84ca938d76c16429.yaml | 6 - .../memcached_hardening-2529734099da27f4.yaml | 4 - .../memcached_logging-f69ade3665a70a5c.yaml | 5 - ...e_keys_from_services-cd17425d58b49840.yaml | 21 - .../notes/metrics-qdr-97c00cc8059963fa.yaml | 9 - .../midonet_removal-9b91c6bb610b139e.yaml | 5 - .../migration_over_ssh-003e2a92f5f5374d.yaml | 14 - .../minor-update-env-20657417094d4aeb.yaml | 7 - .../mistral-mod-wsgi-24d41a6f427237ff.yaml | 7 - .../mistral-removal-df87b8dd8bf6bd4e.yaml | 5 - .../mistral_container-23c4432aad469f30.yaml | 6 - .../notes/mistral_execs-5e1c363c9293504d.yaml | 12 - ...2-networking-ansible-0330b1203f0fc75c.yaml | 3 - .../notes/mod_ssl-e7fd4db71189242e.yaml | 5 - ...ular-libvirt-daemons-b5ad6043d6510077.yaml | 31 - .../notes/mongodb_drop-02daffbfe4975cb9.yaml | 6 - ...itor_interval_ovndbs-b14c886737965300.yaml | 9 - ..._to_configure_ulimit-82057bf64d7173a8.yaml | 5 - ...l-to-host-prep-tasks-934c6e0a9f75f15b.yaml | 8 - ...task_to_common_place-d284378fb16f180a.yaml | 9 - ...e-cinder-rbd-backend-9b015f99887e9241.yaml | 9 - .../mysql-auth-ed25519-28aaea4e69fbfdf7.yaml | 7 - ..._bundle_gcomm_cipher-df7655775d20f4d1.yaml | 6 - .../notes/n1kv_removal-5ff92235ee758cb3.yaml | 5 - ...-tasks-start-at-task-28e4ff92ceec85d2.yaml | 6 - ...ternal-id-properties-20d4aec2e557e980.yaml | 9 - ...rces-deletion-policy-a1aee566e650468c.yaml | 8 - ...emplates-mtu-setting-a76fe47cfc97b36f.yaml | 58 - .../network-update-var-355dbbb4daee7f46.yaml | 14 - ...etal-report-interval-d08a44a147a1846e.yaml | 10 - ...sq_enable_addr6_list-ead32a7739431607.yaml | 6 - ...on-az-config-service-3085b2c296df06c9.yaml | 24 - ...ron-cleanup-services-3a8579cd03fac953.yaml | 4 - .../neutron-kill-script-4d8b6a0130f6fcdd.yaml | 6 - ...plugin-support-added-b2e1292d49e087c3.yaml | 12 - ...2-overlay-ip-version-4f14932355847aa0.yaml | 5 - ...permitted-ethertypes-80dc7f2154786881.yaml | 5 - .../neutron-placement-6ea6de89bd30b592.yaml | 8 - ...port-predictable-ips-857b8369ca81aa4c.yaml | 23 - ...ed-provider-networks-2af1220125612d6f.yaml | 8 - ...ic_type_prohibitlist-21e23e511ca0e922.yaml | 5 - .../neutron_db_rename-bbfbce1c58cadc84.yaml | 5 - .../new-pmem-params-18fb9c25808a7fe6.yaml | 14 - .../notes/nftables-13caf0261a170667.yaml | 21 - .../notes/nfv-rt-roles-9ceac2b10d565971.yaml | 3 - ...-config-ansible-vars-1943a9f2dcab5477.yaml | 8 - ...pt-handle-blank-line-f86553d7589826ed.yaml | 5 - ...with-ansible-default-cbcaedd0a5ad49b0.yaml | 19 - .../no-classic-drivers-9c59b696d8b50692.yaml | 8 - .../notes/no-iscsi-df52429ef64f4093.yaml | 13 - ...ookup_string_to_json-69362e93d862bd87.yaml | 7 - ...fication-driver-noop-e322ca6704a5bc50.yaml | 10 - ...-config-service.yaml-ed7f3846398b2291.yaml | 9 - ...ute-image-parameters-eb3a11bf0fd4691b.yaml | 11 - ...ompute-startup-delay-fdb1f229840bd0e6.yaml | 8 - .../nova-enable_vtpm-b24db74522c8a2eb.yaml | 5 - ...instance_usage_audit-dfb17d89446352ae.yaml | 10 - ...ostcopy-autoconverge-ca1719fd2abed45f.yaml | 8 - ...type-default-changed-27244a925f6d6200.yaml | 17 - ...ax_concurrent_builds-f900d84f35704452.yaml | 6 - ...ova-memlock-increase-066ed22764ed3ce1.yaml | 9 - ...-parms-role-specific-527915c6e99ceb89.yaml | 7 - .../nova-placement-43fd353e6870461c.yaml | 5 - .../nova-purge-maxdelay-fadeea1dc59e8086.yaml | 7 - ...d-pool-role-specific-010f6072d641d84f.yaml | 6 - ...a-scheduler-tunables-8c1dbab10b289480.yaml | 8 - ...d_nfs_vers_parameter-62b9e9d6150358d1.yaml | 8 - ...ive_migration_policy-04fe402b554085b9.yaml | 10 - ...pi_max_limit-support-43fe9792eca63599.yaml | 5 - ...api_show_host_status-f0dfaf4c2b0c536f.yaml | 19 - ...ost_use_internal_api-1bebb3e9c6e69113.yaml | 7 - ...to_internal_endpoint-aaf0e550750335eb.yaml | 7 - ...ute_default_cpu_mode-cda2bb3e56463b3a.yaml | 11 - ...a_compute_el8_legacy-273fb004d090191a.yaml | 10 - ..._fix_log_permissions-e866f91848d647fb.yaml | 9 - .../nova_compute_ksm-444f1cc51ceafb66.yaml | 8 - ...ute_nfs_default_vers-e61d0bf78794be28.yaml | 6 - .../nova_compute_nofile-0427e49cc8ae70a6.yaml | 6 - ...ve_delete_parameters-19288fa689965c58.yaml | 22 - ...eleted_rows-task_log-efbf0aac77aab128.yaml | 6 - ..._libvirt-guests_unit-7ac2c4b5511ca549.yaml | 16 - ...eprecate_consoleauth-ffb93ffa5393b630.yaml | 13 - ..._glance_rbd_download-e945933da26f10f0.yaml | 17 - ...dir_support_for_qemu-accfda4919b3d9dd.yaml | 18 - ...nova_image_cache_ttl-824f241363b9dd4e.yaml | 8 - ...a_libvirt_max_queues-8024fc63105bd25d.yaml | 6 - ...stats_period_seconds-b9b606232629cb38.yaml | 8 - ...bvirt_num_pcie_ports-f904bf0fb9a7b19e.yaml | 12 - ...queue_size_parameter-b4409a91c553ce1b.yaml | 6 - ..._cert_simplification-dbee541be9f55ce5.yaml | 30 - ...libvirtd_log_filters-63e9e6501d779dd9.yaml | 8 - ...a_libvirtd_log_level-b1393b582360ad4f.yaml | 10 - ...libvirtd_log_outputs-bf4091c0c7c5a968.yaml | 9 - ...ibvirtd_tls_priority-d0129f804d7ca847.yaml | 5 - ...al_metadata_per_cell-c374bd46a825c07e.yaml | 11 - ...a_http_cert_metadata-274e7e8a66727983.yaml | 9 - .../nova_metadata_wsgi-bfb240bc84194d05.yaml | 7 - ...gration_limit_access-20be8d69686ca95c.yaml | 8 - ...arget_libvirt_socket-585fa579e5b2704d.yaml | 9 - ...al_post_deploy_tasks-e978560ee59b8b56.yaml | 12 - ...fs_default_secontext-5ad33675aaf0b521.yaml | 13 - ...a_nfs_enabled_podman-a92ea12cd4cd92c8.yaml | 8 - .../nova_novnc_network-83a1479bf227f867.yaml | 10 - ...novnc_proxy_per_cell-f2d0c50d3c21c46c.yaml | 7 - ...nc_proxy_ssl_support-edc7bc13d0cd6d2b.yaml | 8 - ...ment_custom_provider-21203c3ff54c878c.yaml | 7 - ...a_qemu_enable_config-c89016b4d93802d6.yaml | 7 - ...d_for_disk_migration-2e16003c4764a399.yaml | 12 - ...tadata_port_from_api-cdfaf8fb607f6427.yaml | 5 - ...ove_nova-consoleauth-227cbeb44e9ab5ef.yaml | 15 - ...nova_rpc_healthcheck-adbe5307dc04eeac.yaml | 9 - ...iscovery_on_each_run-11dbb6096ebbf51b.yaml | 7 - ...hown_on_every_deploy-c366af9898ecaeed.yaml | 9 - ..._placement_aggregate-9889b0a73dd1d042.yaml | 9 - ...required_for_tenants-6c7d90fd01bcc88d.yaml | 11 - ...or_availability_zone-ffd415710a9cb903.yaml | 9 - ...a_statedir_ownership-54c75dfe8ad64b4f.yaml | 7 - ...plated_cells_db_urls-2eb151090c49c51d.yaml | 10 - ..._cells_transport_url-275f0b707d9227ab.yaml | 10 - ...api_db_for_placement-eea44dd48c768f04.yaml | 12 - ...ova_virtlogd_wrapper-120fcfcfa0787b2b.yaml | 17 - ..._disable_cert_verify-45f532d7a924df86.yaml | 9 - .../notes/nsx-support-1254839718d8df8c.yaml | 3 - .../ntp-poll-options-3019fe517e012300.yaml | 10 - .../num-storage-sacks-f640be5fcd374a6b.yaml | 3 - ...numa-aware-vswitches-cd8df5dd9b725230.yaml | 5 - ...ge_deprecated_params-f7642b6541a0d09c.yaml | 8 - .../octavia-1687026-c01313aab53f55a4.yaml | 5 - ...phora-image-defaults-0d9efe1a0222b76d.yaml | 9 - ...amphora-image-format-f2f3f494e6fbe82c.yaml | 6 - .../octavia-amphora-ssh-245a21a35598440a.yaml | 3 - ...eamer_driver-default-e5152c28713e7707.yaml | 8 - ...g-offload-by-default-53faec2d64688109.yaml | 6 - .../octavia-log-offload-d1617e767f688da1.yaml | 4 - ...t-amphora-images-dir-31da44a544628687.yaml | 14 - ...-service-integration-03bd3eb6cfe1efaf.yaml | 4 - .../octavia-standalone-f1f1121ba77981c3.yaml | 8 - ...avia_service_workers-70887b5882ce0bb0.yaml | 9 - ...nt-mount-data-folder-c993df185af375a8.yaml | 9 - .../odl-enable-tls-377fbbfff6f67230.yaml | 10 - .../odl-ovs-hw-offload-54b662c633e9eda8.yaml | 5 - .../odl-port-binding-d420cac81f714778.yaml | 9 - .../notes/odl-qos-48b70c804755e3a5.yaml | 4 - .../notes/odl-user-a4c58ac0c3a64d90.yaml | 5 - ...l_delete_data_folder-b8c2f9a9382fd692.yaml | 3 - ...karaf_folder_on_host-b81465f62fe422d6.yaml | 9 - ..._marking_inheritance-41e41ff5f0c281d4.yaml | 9 - .../notes/odl_endpoint-74b00c6ed38e9a98.yaml | 7 - ...and_alone_log_config-5358ec26f7e7a970.yaml | 9 - .../notes/odl_upgrade-f5540d242b9a6b52.yaml | 16 - ...ion-external-upgrade-5093de6bd8993b5c.yaml | 16 - ...aylight-manage-repos-9eaf900c08e8d96f.yaml | 5 - .../openshift-removal-d7efdc1696a1999e.yaml | 11 - ...stack-client-service-86d28dab98f1763f.yaml | 11 - ...ings-move-to-ansible-940a48711870ac58.yaml | 14 - ...ts-output-hostroutes-3c710bd9f5641101.yaml | 5 - ...ng-separate-backends-2d2221066f88f479.yaml | 13 - ...rideable-ssh-options-1bd4a78408eb6c2c.yaml | 4 - ...vn-container-support-3ab333fff6e90dc4.yaml | 5 - ...x-minor-update-issue-3a1206549e3b75aa.yaml | 7 - .../notes/ovn-dpdk-15e8747068682f91.yaml | 3 - ...o-frag-config-option-33fe0ee3f6ac0372.yaml | 11 - releasenotes/notes/ovn-fcd4b0168e6745a8.yaml | 6 - .../notes/ovn-ha-c0139ac519680872.yaml | 3 - .../ovn-monitor-all-2fefb215c6f7166c.yaml | 8 - ...ron-metadata-workers-7623588cca0a6948.yaml | 9 - .../ovn-port-forwarding-1c954c1c57e136d0.yaml | 6 - ...move-nonha-env-files-7b68a1e656b9f753.yaml | 8 - ...curity-group-logging-0542b777ea58b5f6.yaml | 6 - .../ovn-separate-vip-ed28ffa3ff77f865.yaml | 9 - ...ervers-config-option-e8f91ad2a05df9a9.yaml | 6 - ...gmp_snooping_support-eccdecde74f4b9c8.yaml | 5 - ...t_support_debug_mode-9d0656ce156a3c91.yaml | 4 - ...emote_probe_interval-b90c7eea0c3fb2f6.yaml | 9 - ...nflow_probe_interval-fd99301d95aac62e.yaml | 8 - ...emote_probe_interval-023b3fa671f88101.yaml | 9 - ...vn_tunnel_encap_type-04df21d622874c27.yaml | 7 - ...2-7-support-for-dpdk-fe665cf9c6b0a750.yaml | 5 - ...-upgrades-workaround-73f4e56127c910b4.yaml | 12 - ...ermission-workaround-20aaebcc8d6009ec.yaml | 6 - ...ovs-dpdk-permissions-50c5b33334ff4711.yaml | 9 - ...dpdk-perms-deprecate-cfddb148fce9656d.yaml | 7 - .../ovs-dpdk-pmd-params-55df11e67acb6736.yaml | 7 - ...ve-deprecated-params-9b4e46eb336301b2.yaml | 11 - .../notes/ovs-dpdk-tso-f96406621ec69bd1.yaml | 4 - .../ovs-hw-offload-a6bf0fa9c39a8204.yaml | 6 - ...handler-threads.yaml-f5a12d1066b042f1.yaml | 3 - .../notes/ovs_dpdk_pmd-74f4cfa0ef280cc0.yaml | 5 - .../ovsdpdk_netcontrold-0a1d4f3e26cea0c6.yaml | 5 - ...pacemaker-by-default-c5d28ee8dc897c62.yaml | 6 - ...r-cluster-common-tag-45c4e8a6e7b08735.yaml | 12 - ...er-common-tag-podman-f9a71344af5c73d6.yaml | 7 - ...e-cluster-common-tag-fe03c47bbcbba13c.yaml | 6 - ...ble-cluster-full-tag-f868a6f0b1f16018.yaml | 10 - .../pacemaker-rabbitmq-b1b5fc2b47cb84e4.yaml | 8 - ...trategy-endpoint-map-83b8321c72421fd6.yaml | 11 - .../paunch_retiring-9aab8248a0b2973b.yaml | 5 - .../pcmktlspriorities-4315010185adf45a.yaml | 7 - ...y-before-controllers-e029e81961dbaee8.yaml | 6 - .../placement-policies-db2704ea63d0bdad.yaml | 4 - ...server-type-per-role-314f38f8e5d4c84e.yaml | 8 - ...ort-physnet-cidr-map-7032fec5a1905314.yaml | 8 - .../notes/power_state-457f12af30b9e341.yaml | 8 - .../powerflex-driver-abd8e372280c44e7.yaml | 3 - .../powermax-driver-d428e372280c44e6.yaml | 6 - .../powerstore-driver-e528e372280c44e6.yaml | 6 - ...config-role-specific-b36cc4bd6383e493.yaml | 11 - ...setup_tasks-addition-63a9e4dfccc2132a.yaml | 6 - ...pgrade_rolling_tasks-6345e98e8283a907.yaml | 9 - .../ps-san_private_key-5aa111e7907ba600.yaml | 4 - releasenotes/notes/ptp-a1bf70fbfddd1830.yaml | 7 - .../notes/puppet-auditd-6504295e8c6c7a3b.yaml | 9 - ...ker-env-file-removed-1e39f85ff7a54be8.yaml | 15 - ...add_iscsi_cider_list-4032429b7dc2f1cb.yaml | 5 - .../notes/pure_add_nvme-9ec98cd889128270.yaml | 8 - ...torage_update_params-4de801b1ed2d0744.yaml | 6 - ...restorage_flashblade-75cbbdae48f233e0.yaml | 4 - ...ement_for_image_type-0bfbef90bb8c3871.yaml | 9 - ...d_network_aggregates-b23a7279643c6a70.yaml | 15 - ...it-ignore-partitions-0a0ae8c636c256ea.yaml | 6 - .../rabbit-net_ticktime-519f904e52218c2d.yaml | 8 - ...le-management-plugin-94b27747e4f5e685.yaml | 6 - .../rabbitmq-erl-args-9029cf4605d63dd9.yaml | 8 - ...itmq-nr-mirrorqueues-7d0451756a67eab4.yaml | 10 - ...rbd-disk-cache-modes-284a73271741ea62.yaml | 5 - .../notes/redfish-9203af1f7bf02bc5.yaml | 8 - ..._disabled_by_default-9992b2bae9b149cd.yaml | 7 - .../notes/refactor-dpdk-dd37ccf14f711bb1.yaml | 23 - ...efactor-service-vips-a48739c1b2fab207.yaml | 54 - ...roduce-keystone_corn-85290afe6bf8b019.yaml | 21 - ...ure_rabbitmq_backlog-68da3a07ef506f57.yaml | 7 - ...pport_for_timemaster-a8dc3e4d5db4e8b3.yaml | 7 - ...ervice_check_for_ffu-19a91d20e146056c.yaml | 6 - .../remove-AdminEmail-70cf5b218f916734.yaml | 5 - ...tronCorePluginML2OVN-8641be21f069c1bd.yaml | 6 - ...remove-NovaPlacement-c33569f37b788eaf.yaml | 5 - .../remove-ValidateNtp-15724eaa8345aa4f.yaml | 8 - ...lability-zone-filter-24a0f480bb2a345b.yaml | 5 - .../notes/remove-cavium-2d8cbe8a5085517a.yaml | 11 - ...move-ceilometer-cron-85362e197ba245a0.yaml | 5 - ...owerstore_appliances-0a4905808b2b66f1.yaml | 5 - ...t-password-interface-55a4e85ef0ccef2f.yaml | 5 - ...config-params-script-d670279038411978.yaml | 8 - ...r-bootstrap-resource-7de0596b90f1b15a.yaml | 8 - ...loyed-server-outputs-d990bf75c7d8ef05.yaml | 5 - ...rver-roles-data.yaml-090c4f589ca493fd.yaml | 6 - ...eployment-swift-data-11afcb4e9925d7b2.yaml | 5 - ...-ceilometer-services-edc0c260d7009414.yaml | 6 - ...e-deprecated-dcn-hci-1aca499135c31e17.yaml | 5 - ...yed-server-bootstrap-07590a3cf4688cc9.yaml | 11 - ...eph-pgnum-parameters-b7bb9c95e30467d5.yaml | 7 - ...download-environment-04f61210d0a304a1.yaml | 8 - ...bled-glance-registry-2738b41a2e200d95.yaml | 5 - .../notes/remove-docker-ef1f16744ff03979.yaml | 6 - ...c2metadata-parameter-36e75f70d2322e55.yaml | 6 - ...-heat-api-cloudwatch-577417e2b2db3b15.yaml | 5 - ...-heat-installed-rear-feae3174fb303416.yaml | 6 - .../remove-hpe-lefthand-23c6beaa777e3dfb.yaml | 8 - ...or-iscsi-module-load-1e7d3c72a222196a.yaml | 6 - ...scsi-from-undercloud-b7424ffcdc9c02ea.yaml | 5 - ...ve-legacy-networking-e9b7f996722bb93b.yaml | 11 - ...emove-logging-groups-b6cee238a235bbc9.yaml | 5 - ...move-logging-sources-e573f5281798a069.yaml | 5 - .../remove-manila-vmax-61c1559fd3e58642.yaml | 8 - ...emove-misspelt-param-eeeea6fab711932e.yaml | 5 - ...e-networking-ansible-c3fca3209b864aa0.yaml | 4 - ...networking-bigswitch-9ae5c280990284a2.yaml | 5 - ...nal-bridge-parameter-a0c7e7ff8d937541.yaml | 6 - ...tron-fujitsu-plugins-6414a5d6962e3260.yaml | 4 - ...remove-neutron-lbaas-e72025b67de3563b.yaml | 11 - ...utron-server-mapping-211ca9751dec268d.yaml | 5 - ...ownload-deprecations-a40fb47c0c0a49f1.yaml | 6 - .../remove-novajoin-3ccef190c99c419b.yaml | 8 - .../remove-nsx-support-e118640eebf0ce23.yaml | 6 - .../notes/remove-ntp-20905abec5281f54.yaml | 5 - ...-from-neutron-config-b923cca547d9dd52.yaml | 4 - ...a_post_workflow_name-84083c092cf79c8d.yaml | 4 - .../notes/remove-odl-735ca4129c2a450e.yaml | 5 - .../remove-odl-dlux-gui-4728de06c973cd53.yaml | 6 - ...one-environment-file-ee8b07d913b4fcca.yaml | 5 - .../remove-old-tls-envs-137cf19b55526a81.yaml | 9 - ...-packages-on-upgrade-99bfd428dd52b4c9.yaml | 4 - .../remove-ovn-vif_type-1c09bf29d1bd38da.yaml | 5 - ...ndbinternal-endpoint-295f452c938aa06c.yaml | 8 - ...words-default-values-dd0cfdf7922ecf90.yaml | 7 - ...ve-puppet-masquerade-28ae63cfa7f613d6.yaml | 9 - .../notes/remove-sahara-ffec9acf94d72905.yaml | 4 - ...ack-action-parameter-285044bb49ad1f5b.yaml | 5 - ...port-for-puppet-ceph-bdafca24a59e7075.yaml | 9 - .../notes/remove-tacker-1375cfa1a2ac8fef.yaml | 5 - ...st-container-support-8950767b0047c9af.yaml | 8 - ...emove-uc-nova-params-edff776d30992aa3.yaml | 20 - ...ve-undercloud-minion-198ce8ea01c4e366.yaml | 7 - ...ud-specific-services-23046e607565d36d.yaml | 6 - ...unused-glance-params-ae24869fef23fa06.yaml | 6 - ...ve-unused-nova-ports-a9f4d9b0ac3e21c9.yaml | 4 - ...e-veritas-hyperscale-a0b5da7d882c853f.yaml | 12 - .../notes/remove-vpp-d950a159ad7b3371.yaml | 4 - .../notes/remove-xinetd-859f2a31631774d1.yaml | 8 - ..._bootstrap_nodeid_ip-d97ec1da4f73599c.yaml | 7 - ...templates-and-params-a2707785af9f617b.yaml | 13 - ..._deprecated_nfv_envs-30f6b3552d34d489.yaml | 10 - ...s_osd_type_osd_count-6fe114130d1e4b2e.yaml | 5 - ...rade_deployment_flag-872df40d7ff171b8.yaml | 7 - .../remove_ffwd_tasks-d1ab630d96a66a59.yaml | 6 - .../remove_pre_network-fb38d9c2095e0597.yaml | 5 - .../rename-admin-token-ae1b694f063035c8.yaml | 5 - ...nces-to-old-ctlplane-0df7f2ae8910559c.yaml | 20 - ...obe_interval_ovn_dbs-4aab423180fb6847.yaml | 7 - .../restart-certmonger-244416f537859bac.yaml | 5 - ...er-disable-templates-82ed8b7d33fc3bda.yaml | 6 - ...ernel-message-buffer-809160674b92a073.yaml | 11 - ...trict-mongodb-memory-de7bf6754d7234d9.yaml | 3 - .../rhel-registration-2752c4e0a14cec98.yaml | 6 - .../notes/rhsm-service-fbec46930264b355.yaml | 5 - .../rhsm_proxy_verify-548f104c97cf5f90.yaml | 5 - ...l-manila-docker-envs-5a376db667ddfd1c.yaml | 7 - .../role-image-prepare-602ee8bf86463a97.yaml | 6 - ...de-to-dvr-containers-bc876f82f3e9f139.yaml | 7 - .../notes/role-tags-16ac2e9e8fcab218.yaml | 18 - ...oles-data-validation-7845702b5ed85366.yaml | 8 - ...es_deprecated_params-50b4bbe8b9e4abc7.yaml | 21 - .../rpc_reponse_timeout-6feb5f62819b2eaf.yaml | 15 - .../notes/rsyslog-5dc93db5eb8fc953.yaml | 5 - ...octavia-under-apache-94afa32e4f1ae3e1.yaml | 11 - .../sahara_auth_v3-65bd276b39b4e284.yaml | 4 - .../notes/sat-tools-0d0f0c53de9d34a5.yaml | 5 - .../notes/sat_capsule-bb59fad44c17f97f.yaml | 7 - .../notes/sc-driver-a428e372280c44e6.yaml | 6 - .../notes/scale_tasks-2042b294d074b37a.yaml | 8 - ...deprecate-old-driver-ab28e372280c44e6.yaml | 5 - .../security-compliance-f4f7ae077b148af1.yaml | 10 - ...er-blacklist-support-370c1a1f15a28a41.yaml | 6 - .../service-role-name-0b8609d314564885.yaml | 4 - ...-be-false-by-default-4fe7acaa60d2a95f.yaml | 6 - ...set-amphora-timezone-d62de55c993c5d70.yaml | 5 - ...ceilometer-auth-flag-382f68ddb2cbcb6b.yaml | 5 - ...n-global-physnet-mtu-5e3515a97e8e0367.yaml | 10 - ...va_opts_from_tripleo-d969b1151ec244a0.yaml | 4 - ...tup_timeouts_ovn_dbs-141be475dd2cd7ae.yaml | 11 - ...man-purge-undercloud-a3a30b5ac3a0951b.yaml | 9 - .../notes/snmp_firewall-ab17f60ba1ec71d2.yaml | 7 - .../notes/snmp_listen-2364188f73d43b14.yaml | 7 - ...t-stack-environments-1f817e24b5d90959.yaml | 7 - ...iov-agent-extentions-08e1d9fb89c2efbf.yaml | 4 - ...riov-pci-passthrough-8f28719b889bdaf7.yaml | 4 - .../notes/sriov-role-1ef30615048239c7.yaml | 14 - ...iov-vf-param-removed-fd6f4519b4eeb05a.yaml | 6 - ...er-options-overrides-f677913bfd65efe1.yaml | 6 - .../ssh_known_hosts-287563590632d1aa.yaml | 4 - .../notes/ssh_pass_auth-8cab3ca5a50d2a5a.yaml | 7 - ...d-service-extensions-0c4d0879942a2052.yaml | 5 - .../stack-name-input-73f4d4d052f1377e.yaml | 5 - ...rc-baremetal-version-309809c01105095f.yaml | 7 - ...t-openstack-services-4ca49c6795bf8ccc.yaml | 11 - ...elinux-configuration-39a0c7285d8e4c66.yaml | 4 - ...uence-1-deploy-steps-59043a5ea87a83f8.yaml | 4 - ...apping-into-services-999a2c5a90b85709.yaml | 6 - ...-glance-at-dcn-sites-6163b8f5333e31a7.yaml | 8 - ...ent-server-blacklist-532221db0e7d2bef.yaml | 5 - ..._for_tenant_networks-30938bfdde547969.yaml | 8 - ...-to-container_file_t-f4914561f6e9e4c7.yaml | 5 - ...onfig-and-postconfig-debd5f28bc578d51.yaml | 6 - ...itional-healthchecks-c286f7b7116e6543.yaml | 5 - ...-barbican-key-id-fix-108f8b58a5092d0a.yaml | 5 - ...ft-container-sharder-b96c2fa43aa66aac.yaml | 6 - ...ate-d1-containerized-8eda040d9088be7d.yaml | 7 - ...ift-create-local-dir-7671f7967620e261.yaml | 6 - ...-not-chown-recursive-1086879873cd9fa6.yaml | 7 - .../swift-fix-ring-sync-7bf3ddbb1ea1e342.yaml | 6 - .../swift-mount-by-uuid-7744fe7696db4b85.yaml | 6 - ...wift-mount-var-cache-7e95199532b542c4.yaml | 7 - ...t-prevent-relabeling-b9721aa5a1abda6e.yaml | 5 - ...verable-node-timeout-1fcd7a83f983e61b.yaml | 11 - ...roxy-use-hash-suffix-a0641435337a4b05.yaml | 7 - .../swift-recon-cron-8a3023ab693486bd.yaml | 6 - .../swift-refresh-rings-cc327f998490b0df.yaml | 6 - .../swift-ring-keeper-c04b440d7d5ce13f.yaml | 9 - ...ngle-replica-cleanup-fdf72b9e462185c7.yaml | 6 - .../swift_external-d9870450f191b89a.yaml | 6 - ...ift_hash_path_prefix-5ecc32ed5d78158b.yaml | 6 - ...to-httpd-support-tls-9b995fe4113b2412.yaml | 6 - ...o-default-ovn-driver-ab4ae9d348158d61.yaml | 16 - .../tag-common-tasks-4a78275787655fdd.yaml | 6 - .../tag-step-plays-b1b1ea7584f1665d.yaml | 5 - ...emetry-role-services-7dba4996aff89027.yaml | 5 - .../timezone-ansible-c7ffcd118f881b82.yaml | 8 - .../notes/tls-inject-86ef6706e68f5740.yaml | 13 - ...en-flush-twice-a-day-d4b00a2953a6b383.yaml | 7 - ...leo-firewall-ansible-3928f04478a09668.yaml | 15 - ...leo-kernel-hugepages-424c19a4b1579af8.yaml | 23 - .../tripleo-nova-nfs-ead2827338aa9519.yaml | 5 - ...pleo-ssh-known-hosts-5c64b1a90d61d7f2.yaml | 9 - .../tripleo-validations-b231184816a9ab64.yaml | 5 - .../notes/tripleo_ui-1923e35ee139f777.yaml | 4 - ...al-puppet-deprecated-3e97347917905254.yaml | 5 - .../notes/tuned-service-650c0eec1cf12a4d.yaml | 4 - ...tuned_custom_profile-25d1f4a2bc217216.yaml | 15 - .../notes/ucsm_removal-929fe9971ed4acc8.yaml | 5 - .../uefi-boot-mode-a8b1b416a0e9cdc4.yaml | 7 - ...i_ipxe_bootfile_name-f2c9cc8971dc1ed8.yaml | 9 - ...d-networks-on-update-294acc2136c0fe47.yaml | 7 - .../undercloud-selinux-37fc6f69a9753109.yaml | 6 - ...rcloud-tripleo-admin-7043cc0b2e4bfb8a.yaml | 5 - ...grade-ephemeral-heat-c838a9c61fc742a3.yaml | 5 - .../notes/unity_cinder_e9872898724a11e7.yaml | 4 - .../notes/unity_manila_1967789872aa11e7.yaml | 3 - ...eph-default-min-size-0297620ed99dab5b.yaml | 12 - ...er-netapp-parameters-cf6da846e72007df.yaml | 11 - ...ellemc-vnx-templates-74a0596fac39b488.yaml | 11 - ...-templates-to-queens-dec638e7b34bed09.yaml | 4 - ...mt-subnet-to-class-b-1cd832ef08a30c85.yaml | 11 - ...anage-event-pipeline-0a21e9ca92cbff4f.yaml | 6 - ...metric-delay-default-963d073026e2cc15.yaml | 4 - ...on-rhel-registration-afbef3ead983b08f.yaml | 6 - ...-resource-by-default-ed54100721f55a30.yaml | 8 - ...ate-plan-environment-4e164b57a801e2cb.yaml | 3 - ..._manila_unity_driver-43aeb041029c4e7f.yaml | 5 - ...te_manila_vnx_driver-678b22c4fcd81fcf.yaml | 4 - .../notes/update_odl-cb997ce5c136ebb7.yaml | 19 - .../notes/update_serial-785ff794ff88fb2e.yaml | 9 - ...upgrade-stack-action-94598796a9d3511f.yaml | 9 - ...pgrade_tasks_cleanup-c913f3f30f4a1db6.yaml | 13 - ...remove_tags_add_when-99f18cdb3cf58f64.yaml | 9 - ...pgradeleapp-per-role-ad2e84e317ec1291.yaml | 6 - ...le-for-hosts-entries-b4905552515e17ff.yaml | 6 - ...ttr-interface-routes-dfbe74adce15da1d.yaml | 7 - ...eploy-steps-playbook-01decb18d895879f.yaml | 6 - ...r-log-format-default-3e9bb40c678388fc.yaml | 6 - ...ig-with-deprecations-2688f34fbc6de74a.yaml | 6 - ...ging-service-net-map-0c65b5e421ce05bd.yaml | 12 - ...host-key-checking-no-766c6d9814243de3.yaml | 7 - ...erface-default-route-2578a4d292901024.yaml | 9 - ...-attrs-ctlplane-cidr-a02e14a251733726.yaml | 37 - ...lplane-default-route-625ef5e414a65f2e.yaml | 25 - ...net-attrs-dnsservers-f751ec1125a9f787.yaml | 24 - ...-attrs-ec2metadataip-aa28f3c030f13c9c.yaml | 25 - ...instead-of-role-name-3c15f9faaa59362e.yaml | 22 - ...-config-outputs-used-8abcb673da6d373f.yaml | 6 - ...ar_lib_docker_deprec-0c48311c01605228.yaml | 7 - ...vgpu-devices-mapping-63dd870f3a00a98a.yaml | 5 - .../vhost_default_dir-cac327a0ac05df90.yaml | 6 - .../notes/vipmap-output-4a9ce99930960346.yaml | 5 - .../notes/vmax_cinder_a6672898724a11e7.yaml | 4 - .../notes/vmax_manila_2967789872aa11e8.yaml | 4 - ...vmx-deprecate-driver-e428e372280c44e6.yaml | 5 - .../notes/vnc_tls-b3707d0134697cc7.yaml | 8 - .../notes/vnx_cinder_b5572898724a11e7.yaml | 4 - .../notes/vnx_manila_2967789872aa11e7.yaml | 4 - ...ltipath-for-libvirt--c8e93a0bb83e0bc8.yaml | 7 - releasenotes/notes/vpp-84d35e51ff62a58c.yaml | 6 - .../notes/vpp-ml2-8e115f7763510531.yaml | 3 - ...xos-deprecate-driver-cd38e372280c44e6.yaml | 5 - .../vxflexos-driver-bec8e372280c44e6.yaml | 4 - ...xlan-support-for-ovn-1320be8046aca9c6.yaml | 4 - .../wait_for_nova_api-7af0c6db1b607216.yaml | 9 - ...et-fqdn-for-rhel-reg-be9c4620146096be.yaml | 8 - .../workflow_tasks-4da5830821b7154b.yaml | 8 - ...per-containers-debug-844c13348c83de93.yaml | 16 - ...mio-add-ports-option-8991f7c8acc1aadb.yaml | 5 - ...deprecate-old-config-d428e372280c44e6.yaml | 5 - .../xtremio-driver-a428f372280c44e6.yaml | 7 - ...si-remove-deprecated-68a8830be3d4f2b8.yaml | 5 - .../xtremio_cinder_c5572898724a11e7.yaml | 4 - .../notes/zaqar-httpd-e7d91bf396da28d0.yaml | 3 - .../notes/zaqar-removal-6cdb31276d5a26dc.yaml | 6 - ...use-redis-by-default-930f542dda895a31.yaml | 5 - .../zaqar-ws-upgrade-8dda7caea7e6bc1e.yaml | 5 - .../zaqar_backends-ccf8adfd24a17bf5.yaml | 5 - ..._undercloud_backends-f63224a2a3aa684e.yaml | 5 - ...ar_ws_timeout_tunnel-d5d1e900dce79b34.yaml | 7 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/conf.py | 269 - releasenotes/source/index.rst | 28 - releasenotes/source/ocata.rst | 6 - releasenotes/source/pike.rst | 6 - releasenotes/source/queens.rst | 6 - releasenotes/source/rocky.rst | 6 - releasenotes/source/stein.rst | 6 - releasenotes/source/train.rst | 6 - releasenotes/source/unreleased.rst | 5 - releasenotes/source/ussuri.rst | 6 - releasenotes/source/victoria.rst | 6 - releasenotes/source/wallaby.rst | 6 - releasenotes/source/zed.rst | 6 - requirements.txt | 7 - roles/BlockStorage.yaml | 46 - roles/CellController.yaml | 63 - roles/CephAll.yaml | 51 - roles/CephFile.yaml | 47 - roles/CephObject.yaml | 47 - roles/CephStorage.yaml | 46 - roles/Compute.yaml | 76 - roles/ComputeAlt.yaml | 56 - roles/ComputeDVR.yaml | 66 - roles/ComputeHCI.yaml | 68 - roles/ComputeHCIOvsDpdk.yaml | 71 - roles/ComputeHCISriov.yaml | 63 - roles/ComputeInstanceHA.yaml | 67 - roles/ComputeLocalEphemeral.yaml | 68 - roles/ComputeOvsDpdk.yaml | 67 - roles/ComputeOvsDpdkRT.yaml | 70 - roles/ComputeOvsDpdkSriov.yaml | 66 - roles/ComputeOvsDpdkSriovRT.yaml | 69 - roles/ComputePPC64LE.yaml | 66 - roles/ComputeRBDEphemeral.yaml | 68 - roles/ComputeRealTime.yaml | 75 - roles/ComputeSriov.yaml | 63 - roles/ComputeSriovIB.yaml | 63 - roles/ComputeSriovRT.yaml | 66 - roles/ComputeVdpa.yaml | 68 - roles/Controller.yaml | 178 - roles/ControllerAllNovaStandalone.yaml | 125 - roles/ControllerNoCeph.yaml | 166 - roles/ControllerNovaStandalone.yaml | 162 - roles/ControllerOpenstack.yaml | 138 - roles/ControllerSriov.yaml | 176 - roles/ControllerStorageDashboard.yaml | 177 - roles/ControllerStorageNfs.yaml | 177 - roles/Database.yaml | 38 - roles/DesignateBind.yaml | 42 - roles/DistributedCompute.yaml | 68 - roles/DistributedComputeHCI.yaml | 77 - roles/DistributedComputeHCIDashboard.yaml | 79 - roles/DistributedComputeHCIScaleOut.yaml | 69 - roles/DistributedComputeScaleOut.yaml | 66 - roles/HciCephAll.yaml | 74 - roles/HciCephFile.yaml | 68 - roles/HciCephMon.yaml | 69 - roles/HciCephObject.yaml | 68 - roles/IronicConductor.yaml | 38 - roles/Messaging.yaml | 36 - roles/Minimal.yaml | 29 - roles/Networker.yaml | 52 - roles/NetworkerSriov.yaml | 54 - roles/NovaManager.yaml | 37 - roles/Novacontrol.yaml | 43 - roles/ObjectStorage.yaml | 52 - roles/README.rst | 225 - roles/Standalone.yaml | 181 - roles/Telemetry.yaml | 52 - roles/Undercloud.yaml | 63 - roles_data.yaml | 402 - roles_data_standalone.yaml | 185 - roles_data_undercloud.yaml | 67 - sample-env-generator/README.rst | 162 - sample-env-generator/composable-roles.yaml | 139 - sample-env-generator/dcn.yaml | 53 - sample-env-generator/messaging.yaml | 35 - sample-env-generator/nova-cell.yaml | 16 - sample-env-generator/openidc.yaml | 47 - .../predictable-placement.yaml | 32 - sample-env-generator/ssl.yaml | 280 - sample-env-generator/standalone.yaml | 246 - sample-env-generator/storage.yaml | 102 - scripts/check-run-nova-compute | 168 - scripts/delay-nova-compute | 45 - scripts/undercloud-upgrade-ephemeral-heat.py | 443 - setup.cfg | 16 - setup.py | 22 - test-ansible-requirements.txt | 12 - test-requirements.txt | 18 - tools/__init__.py | 0 tools/check-up-to-date.sh | 28 - .../convert_heat_nic_config_to_ansible_j2.py | 524 - tools/convert_policy_yaml_to_heat_template.py | 73 - tools/convert_v1_net_data.py | 75 - tools/make_ceph_disk_list.py | 141 - tools/multi-rhel-container-image-prepare.py | 189 - tools/process-templates.py | 406 - tools/releasenotes_tox.sh | 28 - tools/render-ansible-tasks.py | 131 - tools/roles-data-generate-samples.sh | 29 - tools/roles-data-generate.py | 50 - tools/roles-data-validation.sh | 49 - tools/tests/__init__.py | 0 .../2-linux-bonds-vlans-controller.yaml | 344 - .../heat_templates/bond-vlans-controller.yaml | 298 - .../heat_templates/complex.yaml | 237 - .../multiple-nics-vlans-controller.yaml | 280 - .../heat_templates/simple.yaml | 51 - ...gle-nic-linux-bridge-vlans-controller.yaml | 285 - .../single-nic-vlans-controller.yaml | 281 - .../2-linux-bonds-vlans-controller.j2 | 96 - .../j2_references/bond-vlans-controller.j2 | 61 - .../j2_references/complex_complete.j2 | 48 - .../j2_references/complex_incomplete.j2 | 39 - .../multiple-nics-vlans-controller.j2 | 78 - .../j2_references/simple.j2 | 26 - ...ingle-nic-linux-bridge-vlans-controller.j2 | 56 - .../single-nic-vlans-controller.j2 | 51 - .../network_file_complex.yaml | 32 - .../networks_file_simple.yaml | 4 - .../stack_env_complex.yaml | 17 - .../stack_env_simple.yaml | 10 - ...t_convert_heat_nic_config_to_ansible_j2.py | 303 - tools/yaml-diff.py | 145 - tools/yaml-validate.py | 1459 --- tox.ini | 125 - tripleo_heat_templates/__init__.py | 0 .../environment_generator.py | 246 - tripleo_heat_templates/tests/__init__.py | 0 .../tests/test_environment_generator.py | 765 -- .../tests/test_tht_ansible_syntax.py | 78 - .../tests/test_tht_ansible_syntax.yml | 115 - .../tests/test_tht_derivce_pci.py | 1234 -- tripleo_heat_templates/tests/test_yaql.py | 27 - .../tests/tht-role-requirements.yml | 21 - vip_data_default.yaml | 8 - zuul.d/layout.yaml | 37 - 1794 files changed, 8 insertions(+), 116224 deletions(-) delete mode 100644 .ansible-lint delete mode 100644 .gitignore delete mode 100644 .stestr.conf delete mode 100644 LICENSE delete mode 100644 babel.cfg delete mode 100644 baremetal-samples/baremetal_deployment.yaml delete mode 100644 bindep.txt delete mode 100644 ci/README.rst delete mode 100644 ci/common/ironic_standalone_post.yaml delete mode 100644 ci/common/vbmc_setup.yaml delete mode 100644 ci/custom_ci_roles_data.yaml delete mode 100644 ci/custom_names_network_data.yaml delete mode 100644 ci/custom_names_vip_data.yaml delete mode 100644 ci/environments/README.rst delete mode 100644 ci/environments/ceph-min-osds.yaml delete mode 100644 ci/environments/multinode-containers.yaml delete mode 100644 ci/environments/multinode-core.yaml delete mode 100644 ci/environments/network/multiple-nics-ipv6/README delete mode 100644 ci/environments/network/multiple-nics-ipv6/network-environment.yaml delete mode 100644 ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml delete mode 100644 ci/environments/network/multiple-nics-ipv6/network-isolation.yaml delete mode 100644 ci/environments/network/multiple-nics-ipv6/ui-settings.pickle delete mode 100644 ci/environments/network/multiple-nics/README delete mode 100644 ci/environments/network/multiple-nics/custom-network-environment.yaml delete mode 100644 ci/environments/network/multiple-nics/network-environment.yaml delete mode 100644 ci/environments/network/multiple-nics/network-isolation-absolute.yaml delete mode 100644 ci/environments/network/multiple-nics/network-isolation.yaml delete mode 100644 ci/environments/network/multiple-nics/ui-settings.pickle delete mode 100644 ci/environments/network/public-bond/README delete mode 100644 ci/environments/network/public-bond/network-environment.yaml delete mode 100644 ci/environments/network/public-bond/network-isolation-absolute.yaml delete mode 100644 ci/environments/network/public-bond/network-isolation.yaml delete mode 100644 ci/environments/network/public-bond/ui-settings.pickle delete mode 100644 ci/environments/neutron_dns_domain.yaml delete mode 100644 ci/environments/neutron_l3_qos.yaml delete mode 100644 ci/environments/neutron_sec_group_quota.yaml delete mode 100644 ci/environments/octavia-kvm.yaml delete mode 100644 ci/environments/ovb-ha.yaml delete mode 100644 ci/environments/scenario000-multinode-containers.yaml delete mode 100644 ci/environments/scenario000-standalone.yaml delete mode 100644 ci/environments/scenario001-multinode-containers.yaml delete mode 100644 ci/environments/scenario001-standalone.yaml delete mode 100644 ci/environments/scenario002-standalone.yaml delete mode 100644 ci/environments/scenario003-standalone.yaml delete mode 100644 ci/environments/scenario004-standalone.yaml delete mode 100644 ci/environments/scenario007-multinode-containers.yaml delete mode 100644 ci/environments/scenario007-standalone.yaml delete mode 100644 ci/environments/scenario010-multinode-containers.yaml delete mode 100644 ci/environments/scenario010-standalone.yaml delete mode 100644 ci/environments/scenario012-standalone.yaml delete mode 100644 ci/environments/scenario013-standalone.yaml delete mode 100644 ci/environments/standalone-ipa.yaml delete mode 100644 ci/network_data.yaml delete mode 100644 ci/network_data_v6.yaml delete mode 100644 ci/vip_data.yaml delete mode 100644 ci/vip_data_v6.yaml delete mode 100644 common/common-container-config-scripts.yaml delete mode 100644 common/common-container-setup-tasks.yaml delete mode 100644 common/container-puppet.sh delete mode 100644 common/deploy-steps-playbooks-common.yaml delete mode 100644 common/deploy-steps-tasks-step-0.yaml delete mode 100644 common/deploy-steps-tasks-step-1.yaml delete mode 100644 common/deploy-steps-tasks.yaml delete mode 100644 common/deploy-steps.j2 delete mode 100644 common/generate-config-tasks.yaml delete mode 100644 common/hiera-steps-tasks.yaml delete mode 100644 common/host-container-puppet-tasks.yaml delete mode 100644 common/post.j2.yaml delete mode 100644 common/services/role.role.j2.yaml delete mode 100644 config-download-software.yaml delete mode 100644 config-download-structured.yaml delete mode 100644 container_config_scripts/__init__.py delete mode 100755 container_config_scripts/monitoring/collectd_check_health.py delete mode 100755 container_config_scripts/mysql_upgrade_db.sh delete mode 100644 container_config_scripts/nova_api_ensure_default_cells.py delete mode 100755 container_config_scripts/nova_libvirt_init_secret.sh delete mode 100644 container_config_scripts/nova_statedir_ownership.py delete mode 100755 container_config_scripts/nova_wait_for_api_service.py delete mode 100644 container_config_scripts/nova_wait_for_compute_service.py delete mode 100755 container_config_scripts/pacemaker_mutex_restart_bundle.sh delete mode 100755 container_config_scripts/pacemaker_mutex_shutdown.sh delete mode 100755 container_config_scripts/pacemaker_resource_lock.sh delete mode 100755 container_config_scripts/pacemaker_restart_bundle.sh delete mode 100755 container_config_scripts/pacemaker_wait_bundle.sh delete mode 100755 container_config_scripts/placement_wait_for_service.py delete mode 100755 container_config_scripts/pyshim.sh delete mode 100644 container_config_scripts/tests/__init__.py delete mode 100644 container_config_scripts/tests/test_nova_api_ensure_default_cells.py delete mode 100644 container_config_scripts/tests/test_nova_statedir_ownership.py delete mode 100755 container_config_scripts/wait-port-and-run.sh delete mode 100644 deployed-server/README.rst delete mode 100644 deployed-server/deployed-neutron-port.yaml delete mode 100644 deployed-server/deployed-server.yaml delete mode 100755 deployed-server/scripts/enable-ssh-admin.sh delete mode 100644 deployment/README.rst delete mode 100644 deployment/aide/aide-baremetal-ansible.yaml delete mode 100644 deployment/aodh/aodh-api-container-puppet.yaml delete mode 100644 deployment/aodh/aodh-base.yaml delete mode 100644 deployment/aodh/aodh-evaluator-container-puppet.yaml delete mode 100644 deployment/aodh/aodh-listener-container-puppet.yaml delete mode 100644 deployment/aodh/aodh-notifier-container-puppet.yaml delete mode 100644 deployment/apache/apache-baremetal-ansible.j2.yaml delete mode 100644 deployment/apache/apache-baremetal-puppet.j2.yaml delete mode 100644 deployment/auditd/auditd-baremetal-ansible.yaml delete mode 100644 deployment/barbican/barbican-api-container-puppet.yaml delete mode 100644 deployment/barbican/barbican-backend-dogtag-puppet.yaml delete mode 100644 deployment/barbican/barbican-backend-kmip-puppet.yaml delete mode 100644 deployment/barbican/barbican-backend-pkcs11-crypto-puppet.yaml delete mode 100644 deployment/barbican/barbican-backend-simple-crypto-puppet.yaml delete mode 100644 deployment/barbican/barbican-client-puppet.yaml delete mode 100644 deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml delete mode 100644 deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml delete mode 100644 deployment/ceilometer/ceilometer-agent-ipmi-container-puppet.yaml delete mode 100644 deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml delete mode 100644 deployment/ceilometer/ceilometer-base-container-puppet.yaml delete mode 100644 deployment/cephadm/ceph-base.yaml delete mode 100644 deployment/cephadm/ceph-client.yaml delete mode 100644 deployment/cephadm/ceph-external.yaml delete mode 100644 deployment/cephadm/ceph-grafana.yaml delete mode 100644 deployment/cephadm/ceph-ingress.yaml delete mode 100644 deployment/cephadm/ceph-mds.yaml delete mode 100644 deployment/cephadm/ceph-mgr.yaml delete mode 100644 deployment/cephadm/ceph-mon.yaml delete mode 100644 deployment/cephadm/ceph-nfs.yaml delete mode 100644 deployment/cephadm/ceph-osd.yaml delete mode 100644 deployment/cephadm/ceph-rbdmirror.yaml delete mode 100644 deployment/cephadm/ceph-rgw.yaml delete mode 100644 deployment/certs/ca-certs-baremetal-ansible.yaml delete mode 100644 deployment/cinder/cinder-api-container-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-powerflex-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-powermax-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-powerstore-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-sc-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-unity-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-vnx-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-dellemc-xtremio-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-ibm-svf-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-netapp-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-nfs-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-nvmeof-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-pure-puppet.yaml delete mode 100644 deployment/cinder/cinder-backend-rbd-puppet.yaml delete mode 100644 deployment/cinder/cinder-backup-container-puppet.yaml delete mode 100644 deployment/cinder/cinder-backup-pacemaker-puppet.yaml delete mode 100644 deployment/cinder/cinder-base.yaml delete mode 100644 deployment/cinder/cinder-common-container-puppet.yaml delete mode 100644 deployment/cinder/cinder-scheduler-container-puppet.yaml delete mode 100644 deployment/cinder/cinder-volume-container-puppet.yaml delete mode 100644 deployment/cinder/cinder-volume-pacemaker-puppet.yaml delete mode 100644 deployment/clients/openstack-clients-baremetal-ansible.yaml delete mode 100644 deployment/container-image-prepare/container-image-prepare-baremetal-ansible.j2.yaml delete mode 100644 deployment/containers-common.yaml delete mode 100644 deployment/database/mysql-base.yaml delete mode 100644 deployment/database/mysql-client.yaml delete mode 100644 deployment/database/mysql-container-puppet.yaml delete mode 100644 deployment/database/mysql-pacemaker-puppet.yaml delete mode 100644 deployment/database/redis-base-puppet.yaml delete mode 100644 deployment/database/redis-container-puppet.yaml delete mode 100644 deployment/database/redis-pacemaker-puppet.yaml delete mode 100644 deployment/deprecated/README.rst delete mode 100644 deployment/deprecated/etcd-container-puppet.yaml delete mode 100644 deployment/deprecated/multipathd-container.yaml delete mode 100644 deployment/deprecated/neutron/neutron-bgpvpn-api-container-puppet.yaml delete mode 100644 deployment/deprecated/neutron/neutron-bgpvpn-bagpipe-baremetal-puppet.yaml delete mode 100644 deployment/deprecated/neutron/neutron-l2gw-agent-baremetal-puppet.yaml delete mode 100644 deployment/deprecated/neutron/neutron-l2gw-api-container-puppet.yaml delete mode 100644 deployment/deprecated/neutron/neutron-linuxbridge-agent-baremetal-puppet.yaml delete mode 100644 deployment/deprecated/nova/nova-libvirt-container-puppet.yaml delete mode 100644 deployment/deprecated/snmp/snmp-baremetal-puppet.yaml delete mode 100644 deployment/designate/designate-api-container-puppet.yaml delete mode 100644 deployment/designate/designate-base.yaml delete mode 100644 deployment/designate/designate-bind-container.yaml delete mode 100644 deployment/designate/designate-central-container-puppet.yaml delete mode 100644 deployment/designate/designate-mdns-container-puppet.yaml delete mode 100644 deployment/designate/designate-producer-container-puppet.yaml delete mode 100644 deployment/designate/designate-sink-container-puppet.yaml delete mode 100644 deployment/designate/designate-worker-container-puppet.yaml delete mode 100644 deployment/etcd/etcd-container-ansible.yaml delete mode 100644 deployment/experimental/README.rst delete mode 100644 deployment/frr/frr-container-ansible.yaml delete mode 100644 deployment/glance/glance-api-container-puppet.yaml delete mode 100644 deployment/glance/glance-api-edge-container-puppet.yaml delete mode 100644 deployment/glance/glance-api-internal-container-puppet.yaml delete mode 100644 deployment/gnocchi/gnocchi-api-container-puppet.yaml delete mode 100644 deployment/gnocchi/gnocchi-base.yaml delete mode 100644 deployment/gnocchi/gnocchi-metricd-container-puppet.yaml delete mode 100644 deployment/gnocchi/gnocchi-statsd-container-puppet.yaml delete mode 100644 deployment/haproxy/haproxy-container-puppet.yaml delete mode 100644 deployment/haproxy/haproxy-edge-container-puppet.yaml delete mode 100644 deployment/haproxy/haproxy-internal-tls-certmonger.j2.yaml delete mode 100644 deployment/haproxy/haproxy-pacemaker-puppet.yaml delete mode 100644 deployment/haproxy/haproxy-public-tls-certmonger.yaml delete mode 100644 deployment/haproxy/haproxy-public-tls-inject.yaml delete mode 100644 deployment/heat/heat-api-cfn-container-puppet.yaml delete mode 100644 deployment/heat/heat-api-container-puppet.yaml delete mode 100644 deployment/heat/heat-base-puppet.yaml delete mode 100644 deployment/heat/heat-engine-container-puppet.yaml delete mode 100644 deployment/heat/heat-ephemeral-container-ansible.yaml delete mode 100644 deployment/horizon/horizon-container-puppet.yaml delete mode 100644 deployment/image-serve/image-serve-baremetal-ansible.yaml delete mode 100644 deployment/ipa/ipaservices-baremetal-ansible.yaml delete mode 100644 deployment/ipsec/ipsec-baremetal-ansible.yaml delete mode 100644 deployment/ironic/ironic-api-container-puppet.yaml delete mode 100644 deployment/ironic/ironic-base-puppet.yaml delete mode 100644 deployment/ironic/ironic-conductor-container-puppet.yaml delete mode 100644 deployment/ironic/ironic-inspector-container-puppet.yaml delete mode 100644 deployment/ironic/ironic-neutron-agent-container-puppet.yaml delete mode 100644 deployment/ironic/ironic-pxe-container-puppet.yaml delete mode 100644 deployment/iscsid/iscsid-container-puppet.yaml delete mode 100644 deployment/kernel/kernel-baremetal-ansible.yaml delete mode 100644 deployment/kernel/kernel-boot-params-baremetal-ansible.yaml delete mode 100644 deployment/keystone/keystone-container-puppet.yaml delete mode 100644 deployment/logging/files/barbican-api.yaml delete mode 100644 deployment/logging/files/glance-api.yaml delete mode 100644 deployment/logging/files/haproxy.yaml delete mode 100644 deployment/logging/files/heat-api-cfn.yaml delete mode 100644 deployment/logging/files/heat-api.yaml delete mode 100644 deployment/logging/files/heat-engine.yaml delete mode 100644 deployment/logging/files/keystone.yaml delete mode 100644 deployment/logging/files/neutron-api.yaml delete mode 100644 deployment/logging/files/neutron-common.yaml delete mode 100644 deployment/logging/files/nova-api.yaml delete mode 100644 deployment/logging/files/nova-common.yaml delete mode 100644 deployment/logging/files/nova-libvirt.yaml delete mode 100644 deployment/logging/files/nova-metadata.yaml delete mode 100644 deployment/logging/files/placement-api.yaml delete mode 100644 deployment/logging/rsyslog-baremetal-ansible.yaml delete mode 100644 deployment/logging/rsyslog-container-puppet.yaml delete mode 100644 deployment/logging/rsyslog-sidecar-container-puppet.yaml delete mode 100644 deployment/logging/stdout/barbican-api.yaml delete mode 100644 deployment/logging/stdout/glance-api.yaml delete mode 100644 deployment/logging/stdout/haproxy.yaml delete mode 100644 deployment/logging/stdout/heat-api-cfn.yaml delete mode 100644 deployment/logging/stdout/heat-api.yaml delete mode 100644 deployment/logging/stdout/heat-engine.yaml delete mode 100644 deployment/logging/stdout/keystone.yaml delete mode 100644 deployment/logging/stdout/neutron-common.yaml delete mode 100644 deployment/logging/stdout/nova-api.yaml delete mode 100644 deployment/logging/stdout/nova-common.yaml delete mode 100644 deployment/logging/stdout/nova-libvirt.yaml delete mode 100644 deployment/logging/stdout/nova-metadata.yaml delete mode 100644 deployment/logging/stdout/placement-api.yaml delete mode 100644 deployment/login-defs/login-defs-baremetal-ansible.yaml delete mode 100644 deployment/logrotate/logrotate-crond-container-puppet.yaml delete mode 100644 deployment/logrotate/tmpwatch-install.yaml delete mode 100644 deployment/manila/manila-api-container-puppet.yaml delete mode 100644 deployment/manila/manila-backend-cephfs.yaml delete mode 100644 deployment/manila/manila-backend-flashblade-puppet.yaml delete mode 100644 deployment/manila/manila-backend-isilon.yaml delete mode 100644 deployment/manila/manila-backend-netapp.yaml delete mode 100644 deployment/manila/manila-backend-powermax.yaml delete mode 100644 deployment/manila/manila-backend-unity.yaml delete mode 100644 deployment/manila/manila-backend-vnx.yaml delete mode 100644 deployment/manila/manila-base.yaml delete mode 100644 deployment/manila/manila-scheduler-container-puppet.yaml delete mode 100644 deployment/manila/manila-share-common.yaml delete mode 100644 deployment/manila/manila-share-container-puppet.yaml delete mode 100644 deployment/manila/manila-share-pacemaker-puppet.yaml delete mode 100644 deployment/masquerade-networks/masquerade-networks-baremetal-ansible.yaml delete mode 100644 deployment/memcached/memcached-container-puppet.yaml delete mode 100644 deployment/messaging/rpc-qdrouterd-container-puppet.yaml delete mode 100644 deployment/metrics/collectd-container-ansible.yaml delete mode 100644 deployment/metrics/collectd-container-puppet.yaml delete mode 100644 deployment/metrics/qdr-container-ansible.yaml delete mode 100644 deployment/metrics/qdr-container-puppet.yaml delete mode 100644 deployment/multipathd/multipathd-container-ansible.yaml delete mode 100644 deployment/neutron/derive_pci_passthrough_whitelist.py delete mode 100644 deployment/neutron/kill-script delete mode 100644 deployment/neutron/neutron-agents-ib-config-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-api-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-base.yaml delete mode 100755 deployment/neutron/neutron-cleanup delete mode 100644 deployment/neutron/neutron-cleanup.service delete mode 100644 deployment/neutron/neutron-dhcp-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-l3-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-metadata-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-mlnx-agent-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-ovn-dpdk-config-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-ovs-agent-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-ovs-dpdk-agent-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-plugin-ml2-cisco-vts-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-plugin-ml2-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-plugin-ml2-mlnx-sdn-assist-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-plugin-ml2-ovn.yaml delete mode 100644 deployment/neutron/neutron-plugin-ml2.yaml delete mode 100644 deployment/neutron/neutron-sfc-api-container-puppet.yaml delete mode 100644 deployment/neutron/neutron-sriov-agent-container-puppet.yaml delete mode 100644 deployment/nova/nova-api-container-puppet.yaml delete mode 100644 deployment/nova/nova-apidb-client-puppet.yaml delete mode 100644 deployment/nova/nova-az-config.yaml delete mode 100644 deployment/nova/nova-base-puppet.yaml delete mode 100644 deployment/nova/nova-compute-common-container-puppet.yaml delete mode 100644 deployment/nova/nova-compute-container-puppet.yaml delete mode 100644 deployment/nova/nova-conductor-container-puppet.yaml delete mode 100644 deployment/nova/nova-db-client-puppet.yaml delete mode 100644 deployment/nova/nova-ironic-container-puppet.yaml delete mode 100644 deployment/nova/nova-libvirt-common.yaml delete mode 100644 deployment/nova/nova-libvirt-guests-container-puppet.yaml delete mode 100644 deployment/nova/nova-manager-container-puppet.yaml delete mode 100644 deployment/nova/nova-metadata-container-puppet.yaml delete mode 100644 deployment/nova/nova-migration-target-container-puppet.yaml delete mode 100644 deployment/nova/nova-modular-libvirt-container-puppet.yaml delete mode 100644 deployment/nova/nova-scheduler-container-puppet.yaml delete mode 100644 deployment/nova/nova-vnc-proxy-container-puppet.yaml delete mode 100644 deployment/octavia/octavia-api-container-puppet.yaml delete mode 100644 deployment/octavia/octavia-base.yaml delete mode 100644 deployment/octavia/octavia-deployment-config.j2.yaml delete mode 100644 deployment/octavia/octavia-health-manager-container-puppet.yaml delete mode 100644 deployment/octavia/octavia-housekeeping-container-puppet.yaml delete mode 100644 deployment/octavia/octavia-worker-container-puppet.yaml delete mode 100644 deployment/octavia/providers/ovn-provider-config.yaml delete mode 100644 deployment/openvswitch/openvswitch-dpdk-baremetal-ansible.yaml delete mode 100644 deployment/openvswitch/openvswitch-dpdk-netcontrold-container-ansible.yaml delete mode 100644 deployment/ovn/ovn-controller-container-puppet.yaml delete mode 100644 deployment/ovn/ovn-dbs-cluster-ansible.yaml delete mode 100644 deployment/ovn/ovn-dbs-container-puppet.yaml delete mode 100644 deployment/ovn/ovn-dbs-pacemaker-puppet.yaml delete mode 100644 deployment/ovn/ovn-metadata-container-puppet.yaml delete mode 100644 deployment/pacemaker/clustercheck-container-puppet.yaml delete mode 100644 deployment/pacemaker/compute-instanceha-baremetal-puppet.yaml delete mode 100644 deployment/pacemaker/pacemaker-baremetal-puppet.yaml delete mode 100644 deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml delete mode 100644 deployment/placement/placement-api-container-puppet.yaml delete mode 100644 deployment/podman/podman-baremetal-ansible.yaml delete mode 100644 deployment/qdr/qdrouterd-container-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-container-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-notify-container-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-notify-pacemaker-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-pacemaker-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml delete mode 100644 deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml delete mode 100644 deployment/rhsm/rhsm-baremetal-ansible.yaml delete mode 100644 deployment/securetty/securetty-baremetal-ansible.yaml delete mode 100644 deployment/snmp/snmpd-disabled-puppet.yaml delete mode 100644 deployment/sshd/sshd-baremetal-ansible.yaml delete mode 100644 deployment/swift/external-swift-proxy-baremetal-puppet.yaml delete mode 100644 deployment/swift/swift-base.yaml delete mode 100644 deployment/swift/swift-dispersion-baremetal-puppet.yaml delete mode 100644 deployment/swift/swift-proxy-container-puppet.yaml delete mode 100644 deployment/swift/swift-ringbuilder-container-puppet.yaml delete mode 100644 deployment/swift/swift-storage-container-puppet.yaml delete mode 100644 deployment/tests/test-container-volume.yaml delete mode 100644 deployment/time/ptp-baremetal-ansible.yaml delete mode 100644 deployment/time/timezone-baremetal-ansible.yaml delete mode 100644 deployment/timemaster/timemaster-baremetal-ansible.yaml delete mode 100644 deployment/timesync/chrony-baremetal-ansible.yaml delete mode 100644 deployment/tls/undercloud-remove-novajoin.yaml delete mode 100644 deployment/tls/undercloud-tls.yaml delete mode 100644 deployment/tripleo-firewall/tripleo-firewall-baremetal-ansible.yaml delete mode 100644 deployment/tripleo-packages/tripleo-packages-baremetal-puppet.yaml delete mode 100644 deployment/tuned/tuned-baremetal-ansible.yaml delete mode 100644 deployment/unbound/unbound-container-ansible.yaml delete mode 100644 deployment/undercloud/undercloud-upgrade.yaml delete mode 100644 deployment/validations/tripleo-validations-baremetal-ansible.yaml delete mode 100644 doc/requirements.txt delete mode 100644 environments/README.md delete mode 100644 environments/auditd.yaml delete mode 100644 environments/barbican-backend-dogtag.yaml delete mode 100644 environments/barbican-backend-kmip.yaml delete mode 100644 environments/barbican-backend-pkcs11-atos.yaml delete mode 100644 environments/barbican-backend-pkcs11-lunasa.yaml delete mode 100644 environments/barbican-backend-pkcs11-thales.yaml delete mode 100644 environments/barbican-backend-pkcs11.yaml delete mode 100644 environments/barbican-backend-simple-crypto.yaml delete mode 100644 environments/cadf.yaml delete mode 100644 environments/cephadm/ceph-dashboard.yaml delete mode 100644 environments/cephadm/ceph-mds.yaml delete mode 100644 environments/cephadm/ceph-rbdmirror.yaml delete mode 100644 environments/cephadm/cephadm-rbd-only.yaml delete mode 100644 environments/cephadm/cephadm.yaml delete mode 100644 environments/cinder-backup-active-active.yaml delete mode 100644 environments/cinder-backup-active-passive.yaml delete mode 100644 environments/cinder-backup.yaml delete mode 100644 environments/cinder-dellemc-powerflex-config.yaml delete mode 100644 environments/cinder-dellemc-powermax-config.yaml delete mode 100644 environments/cinder-dellemc-powerstore-config.yaml delete mode 100644 environments/cinder-dellemc-sc-config.yaml delete mode 100644 environments/cinder-dellemc-unity-config.yaml delete mode 100644 environments/cinder-dellemc-vnx-config.yaml delete mode 100644 environments/cinder-dellemc-xtremio-config.yaml delete mode 100644 environments/cinder-dellsc-config.yaml delete mode 100644 environments/cinder-ibm-svf-config.yaml delete mode 100644 environments/cinder-iser.yaml delete mode 100644 environments/cinder-netapp-config.yaml delete mode 100644 environments/cinder-nfs-config.yaml delete mode 100644 environments/cinder-nvmeof-config.yaml delete mode 100644 environments/cinder-pure-config.yaml delete mode 100644 environments/cinder-volume-active-active.yaml delete mode 100644 environments/composable-roles/monolithic-ha.yaml delete mode 100644 environments/composable-roles/monolithic-nonha.yaml delete mode 100644 environments/composable-roles/standalone.yaml delete mode 100644 environments/compute-instanceha.yaml delete mode 100644 environments/compute-real-time-example.yaml delete mode 100644 environments/computealt.yaml delete mode 100644 environments/config-debug.yaml delete mode 100644 environments/container-image-prepare-debug.yaml delete mode 100644 environments/dcn-storage.yaml delete mode 100644 environments/dcn.yaml delete mode 100644 environments/debug.yaml delete mode 100644 environments/deployed-network-environment.j2.yaml delete mode 100644 environments/deployed-networks.yaml delete mode 100644 environments/deployed-ports.j2.yaml delete mode 100644 environments/deployed-server-deployed-neutron-ports.yaml delete mode 100644 environments/disable-heat.yaml delete mode 100644 environments/disable-neutron.yaml delete mode 100644 environments/disable-paunch.yaml delete mode 100644 environments/disable-swift.yaml delete mode 100644 environments/docker-ha.yaml delete mode 100644 environments/enable-designate.yaml delete mode 100644 environments/enable-federation-openidc.yaml delete mode 100644 environments/enable-legacy-telemetry.yaml delete mode 100644 environments/enable-secure-rbac.yaml delete mode 100644 environments/enable-stf.yaml delete mode 100644 environments/enable-swap-partition.yaml delete mode 100644 environments/enable-swap.yaml delete mode 100644 environments/external-ceph.yaml delete mode 100644 environments/external-loadbalancer-vip-v6-all.yaml delete mode 100644 environments/external-loadbalancer-vip-v6.yaml delete mode 100644 environments/external-loadbalancer-vip.yaml delete mode 100644 environments/external-network-vip.yaml delete mode 100644 environments/fips.yaml delete mode 100644 environments/firewall.yaml delete mode 100644 environments/fixed-ip-vips-v6.yaml delete mode 100644 environments/fixed-ip-vips.yaml delete mode 100644 environments/ha-redis.yaml delete mode 100644 environments/horizon_password_validation.yaml delete mode 100644 environments/ips-from-pool-all.yaml delete mode 100644 environments/ips-from-pool-ctlplane.yaml delete mode 100644 environments/ips-from-pool.yaml delete mode 100644 environments/ipsec.yaml delete mode 100644 environments/lifecycle/undercloud-upgrade-prepare.yaml delete mode 100644 environments/lifecycle/update-prepare.yaml delete mode 100644 environments/lifecycle/upgrade-prepare.yaml delete mode 100644 environments/logging-environment-rsyslog.yaml delete mode 100644 environments/login-defs.yaml delete mode 100644 environments/low-memory-usage.yaml delete mode 100644 environments/manila-cephfsganesha-config.yaml delete mode 100644 environments/manila-cephfsnative-config.yaml delete mode 100644 environments/manila-flashblade-config.yaml delete mode 100644 environments/manila-isilon-config.yaml delete mode 100644 environments/manila-netapp-config.yaml delete mode 100644 environments/manila-powermax-config.yaml delete mode 100644 environments/manila-unity-config.yaml delete mode 100644 environments/manila-vnx-config.yaml delete mode 100644 environments/memcached-use-ips.yaml delete mode 100644 environments/messaging/rpc-qdrouterd-notify-rabbitmq-hybrid.yaml delete mode 100644 environments/messaging/rpc-rabbitmq-notify-rabbitmq-shared.yaml delete mode 100644 environments/metrics/ceilometer-write-qdr.yaml delete mode 100644 environments/metrics/collectd-read-rabbitmq.yaml delete mode 100644 environments/metrics/collectd-write-qdr.yaml delete mode 100644 environments/metrics/qdr-edge-only-ansible.yaml delete mode 100644 environments/metrics/qdr-edge-only.yaml delete mode 100644 environments/metrics/qdr-form-controller-mesh-ansible.yaml delete mode 100644 environments/metrics/qdr-form-controller-mesh.yaml delete mode 100644 environments/multipathd.yaml delete mode 100644 environments/net-2-linux-bonds-with-vlans.j2.yaml delete mode 100644 environments/net-bond-with-vlans-no-external.j2.yaml delete mode 100644 environments/net-bond-with-vlans.j2.yaml delete mode 100644 environments/net-dpdkbond-with-vlans.j2.yaml delete mode 100644 environments/net-multiple-nics-vlans.j2.yaml delete mode 100644 environments/net-multiple-nics.j2.yaml delete mode 100644 environments/net-noop.j2.yaml delete mode 100644 environments/net-single-nic-linux-bridge-with-vlans.j2.yaml delete mode 100644 environments/net-single-nic-with-vlans-no-external.j2.yaml delete mode 100644 environments/net-single-nic-with-vlans.j2.yaml delete mode 100644 environments/network-environment-v6-all.j2.yaml delete mode 100644 environments/network-environment-v6.j2.yaml delete mode 100644 environments/network-environment.j2.yaml delete mode 100644 environments/network-management-v6.yaml delete mode 100644 environments/network-management.yaml delete mode 100644 environments/networks-disable.j2.yaml delete mode 100644 environments/neutron-bgpvpn-bagpipe.yaml delete mode 100644 environments/neutron-bgpvpn.yaml delete mode 100644 environments/neutron-l2gw.yaml delete mode 100644 environments/neutron-linuxbridge.yaml delete mode 100644 environments/neutron-ml2-cisco-vts.yaml delete mode 100644 environments/neutron-ml2-mlnx-sdn.yaml delete mode 100644 environments/neutron-ovs-dvr.yaml delete mode 100644 environments/neutron-sfc.yaml delete mode 100644 environments/nonha-arch.yaml delete mode 100644 environments/noop-deploy-steps.yaml delete mode 100644 environments/nova-api-policy.yaml delete mode 100644 environments/nova-az-config.yaml delete mode 100644 environments/nova-cell.yaml delete mode 100644 environments/nova-hw-machine-type-upgrade.yaml delete mode 100644 environments/overcloud-baremetal.j2.yaml delete mode 100644 environments/overcloud-steps.yaml delete mode 100644 environments/ovs-hw-offload.yaml delete mode 100644 environments/podman-ha.yaml delete mode 100644 environments/podman.yaml delete mode 100644 environments/predictable-placement/custom-domain.yaml delete mode 100644 environments/predictable-placement/custom-hostnames.yaml delete mode 100644 environments/public-tls-undercloud.yaml delete mode 100644 environments/puppet-tenant-vlan.yaml delete mode 100644 environments/releasenotes/notes/remove_deprecated_ceph_env_files-e71ea73eefe8bfad.yaml delete mode 100644 environments/rhsm.yaml delete mode 100644 environments/securetty.yaml delete mode 100644 environments/services-baremetal/barbican.yaml delete mode 100644 environments/services-baremetal/cinder-backup.yaml delete mode 100644 environments/services-baremetal/collectd.yaml delete mode 100644 environments/services-baremetal/etcd.yaml delete mode 100644 environments/services-baremetal/manila.yaml delete mode 100644 environments/services-baremetal/neutron-ml2-cisco-vts.yaml delete mode 100644 environments/services-baremetal/neutron-ovn-dvr-ha.yaml delete mode 100644 environments/services-baremetal/neutron-ovn-ha.yaml delete mode 100644 environments/services-baremetal/neutron-sriov.yaml delete mode 100644 environments/services-baremetal/octavia.yaml delete mode 100644 environments/services/barbican-edge.yaml delete mode 100644 environments/services/barbican.yaml delete mode 100644 environments/services/collectd.yaml delete mode 100644 environments/services/designate.yaml delete mode 100644 environments/services/etcd.yaml delete mode 100644 environments/services/frr.yaml delete mode 100644 environments/services/haproxy-internal-tls-certmonger.yaml delete mode 100644 environments/services/haproxy-public-tls-certmonger.yaml delete mode 100644 environments/services/heat-api.yaml delete mode 100644 environments/services/heat-engine.yaml delete mode 100644 environments/services/ironic-conductor.yaml delete mode 100644 environments/services/ironic-inspector.yaml delete mode 100644 environments/services/ironic-overcloud.yaml delete mode 100644 environments/services/ironic.yaml delete mode 100644 environments/services/keystone_domain_specific_ldap_backend.yaml delete mode 100644 environments/services/manila.yaml delete mode 100644 environments/services/masquerade-networks.yaml delete mode 100644 environments/services/netcontrold.yaml delete mode 100644 environments/services/neutron-ml2-cisco-vts.yaml delete mode 100644 environments/services/neutron-mlnx-agent.yaml delete mode 100644 environments/services/neutron-ovn-dpdk.yaml delete mode 100644 environments/services/neutron-ovn-dvr-ha.yaml delete mode 100644 environments/services/neutron-ovn-ha.yaml delete mode 100644 environments/services/neutron-ovn-sriov.yaml delete mode 100644 environments/services/neutron-ovn-standalone.yaml delete mode 100644 environments/services/neutron-ovs-dpdk.yaml delete mode 100644 environments/services/neutron-ovs-dvr.yaml delete mode 100644 environments/services/neutron-ovs.yaml delete mode 100644 environments/services/neutron-sfc-ovn.yaml delete mode 100644 environments/services/neutron-sriov.yaml delete mode 100644 environments/services/octavia.yaml delete mode 100644 environments/services/ptp.yaml delete mode 100644 environments/services/qdr.yaml delete mode 100644 environments/services/undercloud-haproxy.yaml delete mode 100644 environments/services/undercloud-neutron-ovn-dhcp.yaml delete mode 100644 environments/services/undercloud-tls.yaml delete mode 100644 environments/services/update-odl.yaml delete mode 100644 environments/split-stack-consistent-hostname-format.j2.yaml delete mode 100644 environments/sshd-banner.yaml delete mode 100644 environments/ssl/enable-internal-tls.j2.yaml delete mode 100644 environments/ssl/enable-memcached-tls.yaml delete mode 100644 environments/ssl/enable-tls.yaml delete mode 100644 environments/ssl/inject-trust-anchor-hiera.yaml delete mode 100644 environments/ssl/inject-trust-anchor.yaml delete mode 100644 environments/ssl/no-tls-endpoints-public-ip.yaml delete mode 100644 environments/ssl/tls-endpoints-public-dns.yaml delete mode 100644 environments/ssl/tls-endpoints-public-ip.yaml delete mode 100644 environments/ssl/tls-everywhere-endpoints-dns.yaml delete mode 100644 environments/standalone/standalone-overcloud.yaml delete mode 100644 environments/standalone/standalone-tripleo.yaml delete mode 100644 environments/stdout-logging.yaml delete mode 100644 environments/storage/cinder-netapp-config.yaml delete mode 100644 environments/storage/cinder-nfs.yaml delete mode 100644 environments/storage/glance-nfs.yaml delete mode 100644 environments/storage/nova-nfs.yaml delete mode 100644 environments/swift-external.yaml delete mode 100644 environments/tripleo-validations.yaml delete mode 100644 environments/undercloud.yaml delete mode 100644 environments/updates/README.md delete mode 100644 environments/updates/update-from-192_0_2-subnet.yaml delete mode 100644 environments/updates/update-from-ml2-ovs-from-rocky.yaml delete mode 100644 environments/updates/update-from-overcloud-compute-hostnames.yaml delete mode 100644 environments/updates/update-from-publicvip-on-ctlplane.yaml delete mode 100644 environments/use-dns-for-vips.yaml delete mode 100644 extraconfig/all_nodes/swap-partition.j2.yaml delete mode 100644 extraconfig/all_nodes/swap.j2.yaml delete mode 100644 extraconfig/nova_metadata/krb-service-principals/role.role.j2.yaml delete mode 100644 extraconfig/post_deploy/README delete mode 100644 extraconfig/post_deploy/default.yaml delete mode 100644 extraconfig/post_deploy/example.yaml delete mode 100644 extraconfig/post_deploy/example_run_on_update.yaml delete mode 100755 extraconfig/post_deploy/undercloud_ctlplane_network.py delete mode 100755 extraconfig/post_deploy/undercloud_post.py delete mode 100755 extraconfig/post_deploy/undercloud_post.sh delete mode 100644 extraconfig/post_deploy/undercloud_post.yaml delete mode 100644 firstboot/.gitkeep delete mode 100644 j2_excludes.yaml delete mode 100644 network-data-samples/default-network-isolation-ipv6.yaml delete mode 100644 network-data-samples/default-network-isolation.yaml delete mode 100644 network-data-samples/ganesha-ipv6.yaml delete mode 100644 network-data-samples/ganesha.yaml delete mode 100644 network-data-samples/legacy-routed-networks-ipv6.yaml delete mode 100644 network-data-samples/legacy-routed-networks.yaml delete mode 100644 network-data-samples/management-ipv6.yaml delete mode 100644 network-data-samples/management.yaml delete mode 100644 network-data-samples/no-networks.yaml delete mode 100644 network-data-samples/routed-networks-ipv6.yaml delete mode 100644 network-data-samples/routed-networks.yaml delete mode 100644 network-data-samples/vip-data-default-network-isolation.yaml delete mode 100644 network-data-samples/vip-data-fixed-ip.yaml delete mode 100644 network-data-samples/vip-data-routed-networks.yaml delete mode 100644 network/deployed_networks.yaml delete mode 100644 network/endpoints/endpoint_map.yaml delete mode 100644 network/networks.j2.yaml delete mode 100644 network/ports/ctlplane_vip.yaml delete mode 100644 network/ports/deployed_port.j2 delete mode 100644 network/ports/deployed_port.network.j2.yaml delete mode 100644 network/ports/deployed_vip_ctlplane.yaml delete mode 100644 network/ports/deployed_vip_port.j2 delete mode 100644 network/ports/deployed_vip_port.network.j2.yaml delete mode 100644 network/ports/from_service.yaml delete mode 100644 network/ports/from_service_v6.yaml delete mode 100644 network/ports/net_ip_list_map.j2.yaml delete mode 100644 network/ports/net_ip_map.j2.yaml delete mode 100644 network/ports/net_vip_map_external.j2.yaml delete mode 100644 network/ports/net_vip_map_external_v6.j2.yaml delete mode 100644 network/ports/noop.yaml delete mode 100644 network/ports/port_from_pool.j2 delete mode 100644 network/ports/port_from_pool.network.j2.yaml delete mode 100644 network/ports/port_from_pool_v6.network.j2.yaml delete mode 100644 network/service_net_map.j2.yaml delete mode 100644 network_data_default.yaml delete mode 100644 network_data_undercloud.yaml delete mode 100644 overcloud-resource-registry-puppet.j2.yaml delete mode 100644 overcloud.j2.yaml delete mode 100644 plan-samples/README.rst delete mode 100644 plan-samples/plan-environment-derived-params.yaml delete mode 100644 puppet/extraconfig/pre_deploy/README delete mode 100644 puppet/extraconfig/pre_deploy/controller/multiple.yaml delete mode 100644 puppet/extraconfig/pre_deploy/default.yaml delete mode 100644 puppet/extraconfig/pre_deploy/per_node.yaml delete mode 100644 puppet/extraconfig/tls/ca-inject.yaml delete mode 100644 puppet/role.role.j2.yaml delete mode 100644 releasenotes/notes/6.0.0-b52a14a71fc62788.yaml delete mode 100644 releasenotes/notes/Add-EnablePublicTLS-parameter-b3fcd01af6f3c101.yaml delete mode 100644 releasenotes/notes/Add-Internal-TLS-CA-File-parameter-c24ee13daaa11dfc.yaml delete mode 100644 releasenotes/notes/Add-Unbound-forwarder-support-8f29e47d4275a798.yaml delete mode 100644 releasenotes/notes/Add-Unbound-service-ba72830f9c75ecc3.yaml delete mode 100644 releasenotes/notes/Add-support-for-ibm_svf-driver-342ca73422326f62.yaml delete mode 100644 releasenotes/notes/Add-upgrade-specific-hiera-file-7a41a23017a545b9.yaml delete mode 100644 releasenotes/notes/Change-zaqar-profile-path-7b00c68c0812fb3d.yaml delete mode 100644 releasenotes/notes/ContainerLogStdoutPath-20cbce05a1710d8a.yaml delete mode 100644 releasenotes/notes/ContainerNovaLibvirtPidsLimit-cdad2166b6c0195f.yaml delete mode 100644 releasenotes/notes/DdpPackage-581e67f08908ed51.yaml delete mode 100644 releasenotes/notes/Deprecate-cinder-API-nova_catalog_admin_info-006ebda240f730a2.yaml delete mode 100644 releasenotes/notes/Disable-mongodb-by-default-cce37d3254a77d27.yaml delete mode 100644 releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml delete mode 100644 releasenotes/notes/IGMP-snooping-for-ml2ovs-d794ed4eab7c098c.yaml delete mode 100644 releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml delete mode 100644 releasenotes/notes/KeystoneChangePasswordUponFirstUse-boolean-a3b35fa933da1d6b.yaml delete mode 100644 releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml delete mode 100644 releasenotes/notes/Make-gnocchis-swift-storage-endpoint-type-configurable-718be8645b7ab3a0.yaml delete mode 100644 releasenotes/notes/Need-explicit-memcacheddebug-to-activate-memcached-debug.-41cfa69d4537cbd7.yaml delete mode 100644 releasenotes/notes/NovaReservedHugePages-35a13e828bfc92e9.yaml delete mode 100644 releasenotes/notes/ODL-healthcheck-new-API-dae1a7ffce51e359.yaml delete mode 100644 releasenotes/notes/ODL_log_mechanism-99f008362d2392de.yaml delete mode 100644 releasenotes/notes/OVNAvailabilityZone-bc3d44a7e11d83e5.yaml delete mode 100644 releasenotes/notes/OVNEncapTos-d3312e58600050b3.yaml delete mode 100644 releasenotes/notes/OVNEncapType-option-added-85296d2fe1d94f35.yaml delete mode 100644 releasenotes/notes/OvsDisableEMC-ab29e5c08856d439.yaml delete mode 100644 releasenotes/notes/Remove-NeutronFirewallDriver-option-f4289b404abcc0b3.yaml delete mode 100644 releasenotes/notes/Remove-version-from-KeystoneUrl-output-fe4ce6f1a45849d3.yaml delete mode 100644 releasenotes/notes/Support-ceph_volume_mode-parameter-5553a9b39718a749.yaml delete mode 100644 releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml delete mode 100644 releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml delete mode 100644 releasenotes/notes/add-CinderEtcdLocalConnect-parameter-8831aad928235458.yaml delete mode 100644 releasenotes/notes/add-ControlPlaneSubnetCidr-to-net_vip_map_external-c2c83431feaf7f35.yaml delete mode 100644 releasenotes/notes/add-GlobalConfigExtraMapData-793757a2b767abe3.yaml delete mode 100644 releasenotes/notes/add-IronicInspectorStorageBackend-parameter-9dd87e751b576007.yaml delete mode 100644 releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml delete mode 100644 releasenotes/notes/add-auth-strategy-parameters-neutron-ironic-3d7330a7dc82f43d.yaml delete mode 100644 releasenotes/notes/add-authtype-for-snmp-readonly-user-c90c9e5a12c92893.yaml delete mode 100644 releasenotes/notes/add-barbican-client-for-dcn-7182e8bab41fce21.yaml delete mode 100644 releasenotes/notes/add-barbican-hsm-code-2ceffb2e1c3f6b67.yaml delete mode 100644 releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml delete mode 100644 releasenotes/notes/add-cadf-environment-0ce0078348c5333f.yaml delete mode 100644 releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml delete mode 100644 releasenotes/notes/add-cinder-backend-nvmeof-023d967980fcf7b8.yaml delete mode 100644 releasenotes/notes/add-cinder-backup-nfs-backend-0108fba91a3058ea.yaml delete mode 100644 releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml delete mode 100644 releasenotes/notes/add-cinder-nfs-snapshot-support-16664aa46a67a5ad.yaml delete mode 100644 releasenotes/notes/add-cinder-rbd-flatten-parameter-3951b341262488fe.yaml delete mode 100644 releasenotes/notes/add-cinder-storage-az-parameters-cddf410bc3464193.yaml delete mode 100644 releasenotes/notes/add-cisco_vts_ml2-fa96d8edb117c416.yaml delete mode 100644 releasenotes/notes/add-client-retry-limit-heat-config-14239eada092811e.yaml delete mode 100644 releasenotes/notes/add-compute-sriov-ib-role-15baefb0dadfcbac.yaml delete mode 100644 releasenotes/notes/add-computevdpa-role-1f7c5df94d9900b6.yaml delete mode 100644 releasenotes/notes/add-connection-logging-parameter-e51f59175c8da204.yaml delete mode 100644 releasenotes/notes/add-console-ssl-cipher-protocol-params-d1f50f2c540c3f2b.yaml delete mode 100644 releasenotes/notes/add-container-registry-login-08d6a87586c84a99.yaml delete mode 100644 releasenotes/notes/add-default-boot-interface-parameter-f07c1fe24c7cb543.yaml delete mode 100644 releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml delete mode 100644 releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml delete mode 100644 releasenotes/notes/add-docker-sfc-bda84d08b119a250.yaml delete mode 100644 releasenotes/notes/add-envs-disable-neutron-heat-6f031e2a4058a581.yaml delete mode 100644 releasenotes/notes/add-etcd-to-distributed-roles-9fe55e4d05f46874.yaml delete mode 100644 releasenotes/notes/add-glance-cinder-mount-point-base-parameter-852554398b9f3a19.yaml delete mode 100644 releasenotes/notes/add-host-routes-to-composable-network-subnets-7076e3b7a466d5fd.yaml delete mode 100644 releasenotes/notes/add-hostgroup-default-for-host-parameter-02e3d48de1f69765.yaml delete mode 100644 releasenotes/notes/add-interface-routes-in-nic-configs-1628993fcaf6095c.yaml delete mode 100644 releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml delete mode 100644 releasenotes/notes/add-ironic-inspector-tls-port-4e59d0c2b2922f68.yaml delete mode 100644 releasenotes/notes/add-kek-rewrap-to-barbican-ef930dda85fe1e1d.yaml delete mode 100644 releasenotes/notes/add-kernel-package-parameter-f3ad68ed4b72b0f5.yaml delete mode 100644 releasenotes/notes/add-l2gw-agent-1a2f14a6ceefe362.yaml delete mode 100644 releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml delete mode 100644 releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml delete mode 100644 releasenotes/notes/add-mellanox-sdn-ml2-docker-58d242b5a8c40ade.yaml delete mode 100644 releasenotes/notes/add-metadata-settings-to-octavia-and-glance-tls-internal-5d8e46650b174626.yaml delete mode 100644 releasenotes/notes/add-metric-name-discovery-parameter-a113a6de8a220408.yaml delete mode 100644 releasenotes/notes/add-mistral-event-engine-3a33700c20c88e86.yaml delete mode 100644 releasenotes/notes/add-mistral-to-roles-52525fe9689e60b1.yaml delete mode 100644 releasenotes/notes/add-mistral-volumes-param-a97418faf7a09022.yaml delete mode 100644 releasenotes/notes/add-networking-ansible-containerized-de68f6c2fd6e05fc.yaml delete mode 100644 releasenotes/notes/add-networking-sfc-72cd16bb34075150.yaml delete mode 100644 releasenotes/notes/add-nova-scheduler-worker-support-0ab66160b936a0c0.yaml delete mode 100644 releasenotes/notes/add-novamkisofscmd-param-6dbb64e4497a8ce6.yaml delete mode 100644 releasenotes/notes/add-octavia-anti-affinity-parameters-fe9222f17b16ee1f.yaml delete mode 100644 releasenotes/notes/add-octavia-cert-key-variables-48133267832ee196.yaml delete mode 100644 releasenotes/notes/add-octavia-driver-agent-support-470fa6fd769c8428.yaml delete mode 100644 releasenotes/notes/add-octavia-log-offload-parameters-dc645daf909a85c2.yaml delete mode 100644 releasenotes/notes/add-octavia-log-offload-protocol-dfc5d8387c3d83bc.yaml delete mode 100644 releasenotes/notes/add-octavia-provider-ovn-e3780665300e7c58.yaml delete mode 100644 releasenotes/notes/add-octavia-tls-settings-7583df511c53b27a.yaml delete mode 100644 releasenotes/notes/add-odl-sriov-env-e31982064c2bf646.yaml delete mode 100644 releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml delete mode 100644 releasenotes/notes/add-opendaylight-inactivity-probe-config-a89f6dcd204192a8.yaml delete mode 100644 releasenotes/notes/add-option-remove-unused-packages-on-upgrade-5a469428a6948148.yaml delete mode 100644 releasenotes/notes/add-ovn-db-endpointmap-2f75dea0b3aa4513.yaml delete mode 100644 releasenotes/notes/add-ovn-dbs-cluster-support-6193cba5be432865.yaml delete mode 100644 releasenotes/notes/add-panko-expirer-bac4243971fc23e5.yaml delete mode 100644 releasenotes/notes/add-param-ovn-dhcp-for-baremetal-disabled-4bc4e648da33f4da.yaml delete mode 100644 releasenotes/notes/add-param-ovn-ovsdb-probe-interval-243e66290a72eb3c.yaml delete mode 100644 releasenotes/notes/add-param-ovn-ovsdb-probe-interval-7ec6aa7f68b9a133.yaml delete mode 100644 releasenotes/notes/add-parameter-kernelargsdeferreboot-5b8e8b2f64c64a91.yaml delete mode 100644 releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml delete mode 100644 releasenotes/notes/add-publictlscafile-parameter-0fd9c19dcd20be0b.yaml delete mode 100644 releasenotes/notes/add-purge-nova-tables-e0706cdcffa0f42e.yaml delete mode 100644 releasenotes/notes/add-qdr-99a27dffef42c13e.yaml delete mode 100644 releasenotes/notes/add-rabbit-fips-option-ddf5a0d7e37d8e5d.yaml delete mode 100644 releasenotes/notes/add-removal-policies-mode-6869362fbeed2cd2.yaml delete mode 100644 releasenotes/notes/add-s3-driver-da8a31a3c646892e.yaml delete mode 100644 releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml delete mode 100644 releasenotes/notes/add-service-auth-configuration-cb6537998a37f785.yaml delete mode 100644 releasenotes/notes/add-support-for-IPv6-deployment-4c5b577cfb38c416.yaml delete mode 100644 releasenotes/notes/add-support-for-barbican-pkcs11-options-a2ec14369518b40e.yaml delete mode 100644 releasenotes/notes/add-support-for-pure-cinder-1a595f1940d5a06f.yaml delete mode 100644 releasenotes/notes/add-support-manila-cephnfs-backend-fb35ecc995705256.yaml delete mode 100644 releasenotes/notes/add-support-networking-ovn-metadata-agent-3bfecfbabd6d9628.yaml delete mode 100644 releasenotes/notes/add-three-more-octavia-params-1e4a32f910e5f1fc.yaml delete mode 100644 releasenotes/notes/add-vlan_transparent-config-5623f8cffc8b41f0.yaml delete mode 100644 releasenotes/notes/add_bios_interface_support-740a666a4ec629fc.yaml delete mode 100644 releasenotes/notes/add_cisco_vts_ml2_docker-0d7cfafe4647074d.yaml delete mode 100644 releasenotes/notes/add_compute_manage_ksm-86fcbd1dc9a193ca.yaml delete mode 100644 releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml delete mode 100644 releasenotes/notes/add_domain_and_no_ntp_to_ipaclient-048fdfccf0cb7835.yaml delete mode 100644 releasenotes/notes/add_glance_cinder_volume_type_to_configure_multiple_cinder_stores-74eea265ee795660.yaml delete mode 100644 releasenotes/notes/add_glance_image_cache_prefetcher-288120ffa6ee2a13.yaml delete mode 100644 releasenotes/notes/add_image_cache_settings-50af5ff56a7d7f75.yaml delete mode 100644 releasenotes/notes/add_max_disk_devices_to_attach-e219429d61a64bb0.yaml delete mode 100644 releasenotes/notes/add_neutron_segments_plugin_to_default-8acb69b112d4b31c.yaml delete mode 100644 releasenotes/notes/add_params_to_configure_ulimit-2359aa058da58054.yaml delete mode 100644 releasenotes/notes/add_site_id_cisco_ml2-60cfa450637d4fe0.yaml delete mode 100644 releasenotes/notes/add_token_auth-fb9f0dfe8e70a4c1.yaml delete mode 100644 releasenotes/notes/add_worker_self_reference_url_for_distributed_image_import-97458c56a1ad95d9.yaml delete mode 100644 releasenotes/notes/added-new-barbican-option-always-set-cka-sensitive-7a9dc31290899cac.yaml delete mode 100644 releasenotes/notes/adding-rear-service-5fac71fa6fbd9c9e.yaml delete mode 100644 releasenotes/notes/addmemcachedmaxconnections-b591c0fa39e821f5.yaml delete mode 100644 releasenotes/notes/adds-post_upgrade_tasks-eba0656012c861a1.yaml delete mode 100644 releasenotes/notes/agent_down_time-d6aebe530ca31b9e.yaml delete mode 100644 releasenotes/notes/aide-50fc91178430f1a5.yaml delete mode 100644 releasenotes/notes/aide-baremetal-puppet-deprecated-25388c875e1d9884.yaml delete mode 100644 releasenotes/notes/all-in-one-via-undercloud.yaml-8766b43a20a4270f.yaml delete mode 100644 releasenotes/notes/all-nodes-extra-map-data-b8c8829dfa7f1c26.yaml delete mode 100644 releasenotes/notes/allocation_ratio-4a8ecf4cdf5fb7e2.yaml delete mode 100644 releasenotes/notes/allow-configure-image-member-quota-59e228bc62ec7792.yaml delete mode 100644 releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml delete mode 100644 releasenotes/notes/allow-resize-to-same-host-62f05a5370993425.yaml delete mode 100644 releasenotes/notes/aodh-expirer-6631733d237a872e.yaml delete mode 100644 releasenotes/notes/apache-timeout-d61a3910ce7dc162.yaml delete mode 100644 releasenotes/notes/api-policy-4ca739519537f6f4.yaml delete mode 100644 releasenotes/notes/auditd-to-ansible-f39bd119bf25320e.yaml delete mode 100644 releasenotes/notes/automated-lvmfilter-3bee670c0108585a.yaml delete mode 100644 releasenotes/notes/barbican-thales-ha-581fbe9b5ef4dc87.yaml delete mode 100644 releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml delete mode 100644 releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml delete mode 100644 releasenotes/notes/big-switch-agent-4c743a2112251234.yaml delete mode 100644 releasenotes/notes/blacklistedhostnames-stack-output-02c9f93f9d1ce7d8.yaml delete mode 100644 releasenotes/notes/bluestore_disks_config-f5553b0540237c4c.yaml delete mode 100644 releasenotes/notes/broadcast-dhcp-in-neutron-container-1e7835e4e7292492.yaml delete mode 100644 releasenotes/notes/bug-1741452-6f73b550d94b8f6f.yaml delete mode 100644 releasenotes/notes/bug-1741464-78403f89e9dc5d75.yaml delete mode 100644 releasenotes/notes/bug-1775667-fix-manila-share-typo-upgrade-82d9b3edea77b94a.yaml delete mode 100644 releasenotes/notes/bug-1782123-7b08ab6503033dd3.yaml delete mode 100644 releasenotes/notes/bug-1788337-fix-manila-db-sync-overcloud-deploy-f323d85509ca81ec.yaml delete mode 100644 releasenotes/notes/bug-1794268-0f875aa640b4246e.yaml delete mode 100644 releasenotes/notes/bug-1797130-b40e402a9bffe2f0.yaml delete mode 100644 releasenotes/notes/bug-1823274-ca992c1055035c7b.yaml delete mode 100644 releasenotes/notes/bug-1831122-fix-netapp-env-file-e5aeb0c2bf417d2b.yaml delete mode 100644 releasenotes/notes/bug-1831767-allow-configuring-manila-share-protocols-6ea6bcbbe21b25ee.yaml delete mode 100644 releasenotes/notes/bug-1832461-RedisVirtualIP-and-InternalApi-name-lower-override-7ea2635877b753c7.yaml delete mode 100644 releasenotes/notes/bug-1895899-8d675670a0d05c15.yaml delete mode 100644 releasenotes/notes/bug-1904482-dbc5162c8245a9b3.yaml delete mode 100644 releasenotes/notes/bug-1907214-df2f07cbacbe8a24.yaml delete mode 100644 releasenotes/notes/bug-1953672-haproxy-rsyslog-6f8e386f8909a253.yaml delete mode 100644 releasenotes/notes/bug-1984222-bd32dbd29741ad51.yaml delete mode 100644 releasenotes/notes/ca-certs-ansible-7a06114f0571d7f3.yaml delete mode 100644 releasenotes/notes/ceilometer-keystone-notifications-f1e463d0ec939b22.yaml delete mode 100644 releasenotes/notes/cell_v2_discovery_off_computes-2b977c6b9a01cde2.yaml delete mode 100644 releasenotes/notes/ceph-ansible-blacklist-support-164b5726cc4532b7.yaml delete mode 100644 releasenotes/notes/ceph-ansible-workflow-70f7d52faf4cd419.yaml delete mode 100644 releasenotes/notes/ceph-hci-osd-count-type-241614543ef7cc58.yaml delete mode 100644 releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml delete mode 100644 releasenotes/notes/ceph_dashboard_composable_service-f72dc4b1cd007734.yaml delete mode 100644 releasenotes/notes/ceph_envs_migration_to_ceph_ansible-fd1a8cb659ac2401.yaml delete mode 100644 releasenotes/notes/ceph_external_multi_config-80d707e5bf75e886.yaml delete mode 100644 releasenotes/notes/cephadm-28185ca8ac814567.yaml delete mode 100644 releasenotes/notes/certificiate-validation-1b08ab8cf40b7cad.yaml delete mode 100644 releasenotes/notes/change-db-sync-timeout-57abe3e48d741842.yaml delete mode 100644 releasenotes/notes/change-ironic-tftp-service-21cc25629b15992f.yaml delete mode 100644 releasenotes/notes/change-keystone-admin-network-a29499018c323cea.yaml delete mode 100644 releasenotes/notes/change-ntp-default-servers-5c4cb51c10ffe60e.yaml delete mode 100644 releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml delete mode 100644 releasenotes/notes/change-rabbitmq-ha-mode-policy-default-6c6cd7f02181f0e0.yaml delete mode 100644 releasenotes/notes/change-swift-worker-defaults-b98f12fb2f677bf1.yaml delete mode 100644 releasenotes/notes/change_default_NeutronPluginExtensions-b31ed1c44fec4568.yaml delete mode 100644 releasenotes/notes/change_default_docker0_address-6a017b7078825996.yaml delete mode 100644 releasenotes/notes/check-availble-network-in-role-7860d8d5cd1df4b0.yaml delete mode 100644 releasenotes/notes/check-old-style-nic-config-4624a60e3303411b.yaml delete mode 100644 releasenotes/notes/chrony-configured-by-default-859e44745491027a.yaml delete mode 100644 releasenotes/notes/cinder-add-timeout-parameters-54550a6e1c11c0b9.yaml delete mode 100644 releasenotes/notes/cinder-backup-active-active-2eb8f8cf612a7989.yaml delete mode 100644 releasenotes/notes/cinder-backup-compression-algorithm-337a6708264cb84a.yaml delete mode 100644 releasenotes/notes/cinder-backup-concurrency-dc7627c617d36133.yaml delete mode 100644 releasenotes/notes/cinder-backup-gcs-s3-backends-7dc04376150164fc.yaml delete mode 100644 releasenotes/notes/cinder-backup-other-ceph-cluster-36852bf2edfd11a7.yaml delete mode 100644 releasenotes/notes/cinder-clarify-default-volume-type-c77e7a7ddafdf172.yaml delete mode 100644 releasenotes/notes/cinder-default-volume-type-cc21a256202eb476.yaml delete mode 100644 releasenotes/notes/cinder-multiple-dellemc_unity-backends-96c6d2764209e317.yaml delete mode 100644 releasenotes/notes/cinder-multiple-dellemc_vnx-backends-930c8e9309a6ccf5.yaml delete mode 100644 releasenotes/notes/cinder-multiple-netapp-backends-71ed7fef3008e654.yaml delete mode 100644 releasenotes/notes/cinder-multiple-nfs-backends-a6b7a107be066596.yaml delete mode 100644 releasenotes/notes/cinder-multiple-pure-backends-4537532f428315e1.yaml delete mode 100644 releasenotes/notes/cinder-nfs-conversion-dir-ba9b8dce515808ce.yaml delete mode 100644 releasenotes/notes/cinder-nfs-snapshot-support-disabled-2d2e08c97537bc94.yaml delete mode 100644 releasenotes/notes/cinder-nvmeof-fixes-d3f53887e0dde454.yaml delete mode 100644 releasenotes/notes/cinder-rbd-multiconfig-dff6b46a0b20331a.yaml delete mode 100644 releasenotes/notes/cinder-remove-dell-ps-fce96d05f529d0da.yaml delete mode 100644 releasenotes/notes/cinder-support-keystone-project-personas-b71c35c4c1a8bbd8.yaml delete mode 100644 releasenotes/notes/cinder-v1-cleanup-7154ca07652804cf.yaml delete mode 100644 releasenotes/notes/cinder-v3-endpoints-498e00f96e77f3c6.yaml delete mode 100644 releasenotes/notes/cinder-volume-active-active-976f2bc33ab52c94.yaml delete mode 100644 releasenotes/notes/cinder_backup_default_switch_to_A-A-4adb439183be78f0.yaml delete mode 100644 releasenotes/notes/cinder_nfs_backend_new_template-21243ef33346ed9a.yaml delete mode 100644 releasenotes/notes/cinder_rbd_backend_new_template-b25b017fdd7c90c1.yaml delete mode 100644 releasenotes/notes/cleanup-heat-api-cloudwatch-f12315e86c7f12d4.yaml delete mode 100644 releasenotes/notes/cleanup-nova-params-cc7c501dcb9879d0.yaml delete mode 100644 releasenotes/notes/cleanup-odl-clustering-93a3ec132f3c2343.yaml delete mode 100644 releasenotes/notes/cleanup-xinetd-e2f59fda914b32d9.yaml delete mode 100644 releasenotes/notes/collectd-amqp1-a1d8773e8e7ff80e.yaml delete mode 100644 releasenotes/notes/collectd-overcloud-gnocchi-049a63bbd196a9bb.yaml delete mode 100644 releasenotes/notes/collectd-polling-4aac123faaebd1bc.yaml delete mode 100644 releasenotes/notes/collectd-remove-processes-plugin-default-1d07c9ddfde0c2f7.yaml delete mode 100644 releasenotes/notes/collectd-write-qdr-f790703e3f0c3245.yaml delete mode 100644 releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml delete mode 100644 releasenotes/notes/composable-network-subnets-fbfcb6283a54ace7.yaml delete mode 100644 releasenotes/notes/composable-network-vips-4d7b28fa3769d38b.yaml delete mode 100644 releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml delete mode 100644 releasenotes/notes/composable-veritas-hyperscale-driver-e7f0a35d7d9a8df1.yaml delete mode 100644 releasenotes/notes/compute-cpu-shared-set-be9da772ac8f7582.yaml delete mode 100644 releasenotes/notes/compute-resume-guests-state-on-host-boot-090507db3ffad0d4.yaml delete mode 100644 releasenotes/notes/compute-tuned-profile-cbe4e22b1e185c5e.yaml delete mode 100644 releasenotes/notes/compute_deprecated_params-a2d69efd75f7c50f.yaml delete mode 100644 releasenotes/notes/computeovsdpdk-role-67d53a405ce4174b.yaml delete mode 100644 releasenotes/notes/config-download-default-to-true-2331debd56c396eb.yaml delete mode 100644 releasenotes/notes/configurable-snmpd-options-3954c5858e2c7656.yaml delete mode 100644 releasenotes/notes/configure-ip-forward-268c165708cbd203.yaml delete mode 100644 releasenotes/notes/configure-multipathd-d3fb4683b9ffbb4b.yaml delete mode 100644 releasenotes/notes/configure-multipathd-with-ansible-f32f3ea627815191.yaml delete mode 100644 releasenotes/notes/configuring-snat-in-opendaylight-d5ed4d62275e1876.yaml delete mode 100644 releasenotes/notes/congress_removal-95dd16335fbb8bce.yaml delete mode 100644 releasenotes/notes/consolidate-octavia-post-deploy-configs-bc251a5446e5615d.yaml delete mode 100644 releasenotes/notes/containarise-barbican-1253606411d497ff.yaml delete mode 100644 releasenotes/notes/container-images-prepare-c446ed0b04c587f7.yaml delete mode 100644 releasenotes/notes/container-logging-novajoin-90398bce0bc1a7b9.yaml delete mode 100644 releasenotes/notes/container_cli-64a217e8abb508c4.yaml delete mode 100644 releasenotes/notes/container_image_prepare_debug-52fcb324633d2cf0.yaml delete mode 100644 releasenotes/notes/containerize-neutron-lbaas-service-plugin-20562487d6631c88.yaml delete mode 100644 releasenotes/notes/containerized-ceph-role-1e02fd11551e92ac.yaml delete mode 100644 releasenotes/notes/containerized-services-logs-0dc652513870f46d.yaml delete mode 100644 releasenotes/notes/containerized-tempest-support-0ceaaf6427ce36e9.yaml delete mode 100644 releasenotes/notes/containers-as-default-37bbe8afa0a60c2b.yaml delete mode 100644 releasenotes/notes/containers-selinux-context-13b720cc1d5ec6f2.yaml delete mode 100644 releasenotes/notes/contrail-bugfixes-and-dpdk-enabling-0233a06e23259660.yaml delete mode 100644 releasenotes/notes/controller_deprecated_params-7f009de6d17c05a4.yaml delete mode 100644 releasenotes/notes/converge-var_run-to-run-20286a74e780e999.yaml delete mode 100644 releasenotes/notes/convert-resource-name-to-number-80ada6c825554f56.yaml delete mode 100644 releasenotes/notes/corosync_token_timeout-d674c873c505cfe6.yaml delete mode 100644 releasenotes/notes/cpu-flags-5b027db3eb2b86c2.yaml delete mode 100644 releasenotes/notes/cpu-resources-f8b511d39c6e0cfe.yaml delete mode 100644 releasenotes/notes/create-br-ex-for-compute-b967312323d45d9b.yaml delete mode 100644 releasenotes/notes/ctlplane-undercloud-conf-host-routes-00e981d1f00405d8.yaml delete mode 100644 releasenotes/notes/ctlplane_fixed_ip-81d14db5a01fa531.yaml delete mode 100644 releasenotes/notes/custom-network-names-c62a57827d98eea5.yaml delete mode 100644 releasenotes/notes/dcn-environments-96379042a22f895e.yaml delete mode 100644 releasenotes/notes/dcn-hci-storage-rename-0b1c17dd50f4cc9a.yaml delete mode 100644 releasenotes/notes/debug_per_service-54a260917c4a7e3a.yaml delete mode 100644 releasenotes/notes/decoupling_api_rpc_workers_neutron_api-eb5820d6bcedb53b.yaml delete mode 100644 releasenotes/notes/default-bootstrap-server-id-eff49ef40bfde414.yaml delete mode 100644 releasenotes/notes/default-octavia-ssh-pub-key-to-keypair-70377d43bf76a407.yaml delete mode 100644 releasenotes/notes/default-ovn-to-raft-cluster-9a1fb213d28d0650.yaml delete mode 100644 releasenotes/notes/dellemc-driver-cleanup-4a1febdc51ec674c.yaml delete mode 100644 releasenotes/notes/dellsc-deprecate-old-driver-f428e372280c44e6.yaml delete mode 100644 releasenotes/notes/dellsc-driver-d7cd300a24a64b01.yaml delete mode 100644 releasenotes/notes/dellsc_cinder_a5572898724a11e7.yaml delete mode 100644 releasenotes/notes/deploy_steps_tasks-bb2279bcda7cd04c.yaml delete mode 100644 releasenotes/notes/deployed-server-environment-output-d838c782f76823b7.yaml delete mode 100644 releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml delete mode 100644 releasenotes/notes/deployed-server-net-config-static-bridge-c15bf767d3a28759.yaml delete mode 100644 releasenotes/notes/deployed-servers-fd47f18204cea105.yaml delete mode 100644 releasenotes/notes/deprecate-DnsServers-a65541c2dceced08.yaml delete mode 100644 releasenotes/notes/deprecate-IronicIPXEEnabled-6cb68b0bfee5ac4d.yaml delete mode 100644 releasenotes/notes/deprecate-MysqlIncreaseFileLimit-20548c5f7234d14c.yaml delete mode 100644 releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml delete mode 100644 releasenotes/notes/deprecate-NovaVGPUTypesDeviceAddressesMapping-b4af9f1a48b66854.yaml delete mode 100644 releasenotes/notes/deprecate-SshServerOptionsOverrides-parameter-40820bd943e20018.yaml delete mode 100644 releasenotes/notes/deprecate-ceilometer-expirer-83b193a07631d89d.yaml delete mode 100644 releasenotes/notes/deprecate-ceilometer-workers-72b01f4bc6423f48.yaml delete mode 100644 releasenotes/notes/deprecate-collector-a16e5d58ae00806d.yaml delete mode 100644 releasenotes/notes/deprecate-deployed-server-port-map-99d4d6a01f630d04.yaml delete mode 100644 releasenotes/notes/deprecate-docker-yaml-fa75b14e93fdf763.yaml delete mode 100644 releasenotes/notes/deprecate-docker_nova_migration_sshd_port-b99c1ba0ac012276.yaml delete mode 100644 releasenotes/notes/deprecate-driver_handles_share_servers-d6af4e7c25c34c6e.yaml delete mode 100644 releasenotes/notes/deprecate-duplicate-nfv-envs-615d5b97bae6f9a9.yaml delete mode 100644 releasenotes/notes/deprecate-etcd-tls-workaround-de5dd1fc19dae5b2.yaml delete mode 100644 releasenotes/notes/deprecate-fluentd-049bce4cb556962f.yaml delete mode 100644 releasenotes/notes/deprecate-ironic-ip-version-27e24f03da6ddd59.yaml delete mode 100644 releasenotes/notes/deprecate-kubernetes-services-69ca5ec733d24644.yaml delete mode 100644 releasenotes/notes/deprecate-management-envs-bbc7fddb0ca871af.yaml delete mode 100644 releasenotes/notes/deprecate-manila-cephfs-enable-snapshot-7857604802378720.yaml delete mode 100644 releasenotes/notes/deprecate-manila-cephfsnative-parameters-e24978c8b344573b.yaml delete mode 100644 releasenotes/notes/deprecate-misspelt-param-34fee48da69fc480.yaml delete mode 100644 releasenotes/notes/deprecate-networking-ansible-11004424696a71a4.yaml delete mode 100644 releasenotes/notes/deprecate-neutron-host-services-93a7187fdebe0302.yaml delete mode 100644 releasenotes/notes/deprecate-neutron-server-mapping-af17af3d8e09ade4.yaml delete mode 100644 releasenotes/notes/deprecate-neutronsriovnumvfs-96c9de5c6f071aeb.yaml delete mode 100644 releasenotes/notes/deprecate-non-config-download-templates-3971fbf85b93846d.yaml delete mode 100644 releasenotes/notes/deprecate-nova-enable-numa-live-migration-5e0601c7d26a8f3c.yaml delete mode 100644 releasenotes/notes/deprecate-nova-scheduler_default_filters-122a7448d808dd82.yaml delete mode 100644 releasenotes/notes/deprecate-novajoin-ef06b1ca33a2b80c.yaml delete mode 100644 releasenotes/notes/deprecate-opendaylight-service-64b960923324edc4.yaml delete mode 100644 releasenotes/notes/deprecate-panko-b2bdce647d2b9a6d.yaml delete mode 100644 releasenotes/notes/deprecate-puppet-selinux-config-cc8d2788c534d628.yaml delete mode 100644 releasenotes/notes/deprecate-removed-netapp-systems-15c58d7e807ba33a.yaml delete mode 100644 releasenotes/notes/deprecate-sahara-8f0bc905e3d21af2.yaml delete mode 100644 releasenotes/notes/deprecate-sensu-dd2e8933cb147166.yaml delete mode 100644 releasenotes/notes/deprecate-sevice-ipv6-params-e301590647a0c8f5.yaml delete mode 100644 releasenotes/notes/deprecate-snmp-084d9e625f4f4e56.yaml delete mode 100644 releasenotes/notes/deprecate-vpp-2911807152cd4b62.yaml delete mode 100644 releasenotes/notes/deprecate-xinetd-service.yaml-d7594bf8a7b714e2.yaml delete mode 100644 releasenotes/notes/deprecate-zaqar-622f409474813783.yaml delete mode 100644 releasenotes/notes/deprecate_auth_uri_parameter-bdebdc6614ce8b7e.yaml delete mode 100644 releasenotes/notes/deprecate_ceph_pg_num_in_tht-91fe6303cc5ba07e.yaml delete mode 100644 releasenotes/notes/deprecate_docker_all-40eb568c9234a3d8.yaml delete mode 100644 releasenotes/notes/deprecate_ec2_api-811ac17575077b2a.yaml delete mode 100644 releasenotes/notes/deprecate_mistral-9136fd5e41b37e0d.yaml delete mode 100644 releasenotes/notes/deprecated-deployed-server-roles-data-464b3147ae7434d3.yaml delete mode 100644 releasenotes/notes/deprecated-vn-ha-env-files-f0d967d173dcdb16.yaml delete mode 100644 releasenotes/notes/deprecated_services-172a1ae6348e6c52.yaml delete mode 100644 releasenotes/notes/deprecation-573102-e63aeba5930adf28.yaml delete mode 100644 releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml delete mode 100644 releasenotes/notes/designate-enable-bind-logging-642e77541645519b.yaml delete mode 100644 releasenotes/notes/designate-environment-file-bd08eef69758a996.yaml delete mode 100644 releasenotes/notes/designate-redis-coordination-b4afdcc8855cc0ca.yaml delete mode 100644 releasenotes/notes/designate-support-b66127d9f4995db2.yaml delete mode 100644 releasenotes/notes/direct-deploy-by-default-bc78a63f0a0c6e15.yaml delete mode 100644 releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml delete mode 100644 releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml delete mode 100644 releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml delete mode 100644 releasenotes/notes/disable-heat-api-cloudwatch-d5a471da22472bde.yaml delete mode 100644 releasenotes/notes/disable-heat-non-lifecycle-actions-d551fe4551d71770.yaml delete mode 100644 releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml delete mode 100644 releasenotes/notes/disable-manila-cephfs-snapshots-by-default-d5320a05d9b501cf.yaml delete mode 100644 releasenotes/notes/disable-non-containerized-swift-services-4f7edd98203d749b.yaml delete mode 100644 releasenotes/notes/disable-notification-driver-a888d4e9b8eed1dc.yaml delete mode 100644 releasenotes/notes/disable_default_apache_vhost-f41d11fe07605f7f.yaml delete mode 100644 releasenotes/notes/distributed-roles-a3a5a5d9eb25e6b0.yaml delete mode 100644 releasenotes/notes/dnf-streams-interface-c7191173efb7b139.yaml delete mode 100644 releasenotes/notes/docker-bip-9a334c8f31a59b96.yaml delete mode 100644 releasenotes/notes/docker-puppet-config-volume-5ad50b90dc24672b.yaml delete mode 100644 releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml delete mode 100644 releasenotes/notes/docker_puppet_tasks-e74637224ee66f66.yaml delete mode 100644 releasenotes/notes/dont-unregister-on-delete-9708f7cbc73a0d2f.yaml delete mode 100644 releasenotes/notes/dpdk_driver_remove-76d61711c19cd099.yaml delete mode 100644 releasenotes/notes/drop-baremetal-aodh-22cb1dfc59204427.yaml delete mode 100644 releasenotes/notes/drop-baremetal-glance-5836be7624e63133.yaml delete mode 100644 releasenotes/notes/drop-baremetal-haproxy-5e2f0f3c9b8da664.yaml delete mode 100644 releasenotes/notes/drop-baremetal-ironic-ab7af5d2b75c9cbf.yaml delete mode 100644 releasenotes/notes/drop-baremetal-keepalived-c48cde2007b372a4.yaml delete mode 100644 releasenotes/notes/drop-baremetal-keystone-000a4babb7f8ef60.yaml delete mode 100644 releasenotes/notes/drop-baremetal-memcached-6cbdfd80bef14a41.yaml delete mode 100644 releasenotes/notes/drop-baremetal-mysql-server-a36a2f39f88c8181.yaml delete mode 100644 releasenotes/notes/drop-baremetal-redis-2e2f221b9ee6f9cc.yaml delete mode 100644 releasenotes/notes/drop-baremetal-sahara-f2922322511047de.yaml delete mode 100644 releasenotes/notes/drop-baremetal-zaqar-abe0f9cae9c87879.yaml delete mode 100644 releasenotes/notes/drop-manila-generic-driver-templates-b33e8966c263a1fd.yaml delete mode 100644 releasenotes/notes/drop-redundant-metric-param-ddea256079fbbbe0.yaml delete mode 100644 releasenotes/notes/dynamic-hieradata-with-dots-deprecated-52d4c77e09821e87.yaml delete mode 100644 releasenotes/notes/enable-arp_accept-6296b0113bc56b10.yaml delete mode 100644 releasenotes/notes/enable-cache-293c39b3b6f55c80.yaml delete mode 100644 releasenotes/notes/enable-deep_compare-fencing-698cec642ecd54a4.yaml delete mode 100644 releasenotes/notes/enable-dns_domain_ports-extension-by-default-4a620563ff5954d6.yaml delete mode 100644 releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml delete mode 100644 releasenotes/notes/enable-glance-image-cache-01e54adaa952f90b.yaml delete mode 100644 releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml delete mode 100644 releasenotes/notes/enable-ml2-baremetal-with-ironic-service-8c2909023e3896a5.yaml delete mode 100644 releasenotes/notes/enable-neutron-lbaas-integration-b72126f2c7e71cee.yaml delete mode 100644 releasenotes/notes/enable-neutron-lbaas-integration-fa999ccd548ee6b6.yaml delete mode 100644 releasenotes/notes/enable-ntp-iburst-efbc24a43a72daae.yaml delete mode 100644 releasenotes/notes/enable-octavia-flavor-by-default-d356fbb265508f76.yaml delete mode 100644 releasenotes/notes/enable-port-forwarding-in-neutron-956cb21a3310e881.yaml delete mode 100644 releasenotes/notes/enable-secure-rbac-for-neutron-842bd41339a48f26.yaml delete mode 100644 releasenotes/notes/enable-stf-no-resource-registry-db6ee6319964ab7f.yaml delete mode 100644 releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml delete mode 100644 releasenotes/notes/enable-vfio-for-sriov-62b7bd67df250840.yaml delete mode 100644 releasenotes/notes/enable_glance_sparse_image_upload-fe9e594f91cf57f1.yaml delete mode 100644 releasenotes/notes/enable_image_conversion_parameters-740e5282385444c7.yaml delete mode 100644 releasenotes/notes/enable_isolated_aggregate_filtering-2aec5a693bf79852.yaml delete mode 100644 releasenotes/notes/enable_secure_rbac_for_keystone-62685484ef589726.yaml delete mode 100644 releasenotes/notes/enable_secure_rbac_support_for_glance-167d53c491cd326c.yaml delete mode 100644 releasenotes/notes/enabled-image-inject-metadata-properties-72cdc946748e9b1b.yaml delete mode 100644 releasenotes/notes/endpointmap-8825fcd5fa5a2ba2.yaml delete mode 100644 releasenotes/notes/ensure-all-debug-params-boolean-b1256f282e414b98.yaml delete mode 100644 releasenotes/notes/ensure-debug-is-boolean-aecc8fbb6c8fd368.yaml delete mode 100644 releasenotes/notes/erl-sbwtdcpu-sbwtdio-b26506a0430480dc.yaml delete mode 100644 releasenotes/notes/error-on-new-heat-deployment-outputs-1377270acbc5bc7e.yaml delete mode 100644 releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml delete mode 100644 releasenotes/notes/example-roles-d27c748090f6a154.yaml delete mode 100644 releasenotes/notes/expose-metric-processing-delay-0c098d7ec0af0728.yaml delete mode 100644 releasenotes/notes/external-bridge-by-default-on-compute-f3ff6bf46ab80640.yaml delete mode 100644 releasenotes/notes/external-update-upgrade-2d7bd96959ace08d.yaml delete mode 100644 releasenotes/notes/external_ceph_environment-05a1405bce969060.yaml delete mode 100644 releasenotes/notes/external_post_deploy_tasks-1b6423b73ae083cc.yaml delete mode 100644 releasenotes/notes/extra-keystone-notification-topics-8be1d37afd90b910.yaml delete mode 100644 releasenotes/notes/extra_group_vars-aafa71945882442f.yaml delete mode 100644 releasenotes/notes/extraconfig_services-18ca28841974b8dd.yaml delete mode 100644 releasenotes/notes/fast-forward-upgrade-tasks-a8b5d64831a71e24.yaml delete mode 100644 releasenotes/notes/ffu-custom-script-to-switch-repo-a65db91760b46ec2.yaml delete mode 100644 releasenotes/notes/fix-HeatConfigureDelegatedRoles-d16ddb5eb037c4c8.yaml delete mode 100644 releasenotes/notes/fix-PingTestGatewayIPsMap-yaql-filter-empty-string-b024ce1bce9754ef.yaml delete mode 100644 releasenotes/notes/fix-allocation-range-for-StorageNFS-net.yaml-bd77be924e8b7056.yaml delete mode 100644 releasenotes/notes/fix-api-network-resource-name-41bbf4258a0174eb.yaml delete mode 100644 releasenotes/notes/fix-bridge-nf-call-defaults.rst delete mode 100644 releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml delete mode 100644 releasenotes/notes/fix-contiainer-underloud-keepalived-needs-restart-6d7efbb9788e0f95.yaml delete mode 100644 releasenotes/notes/fix-custom-netname-lower-and-tls-everywhere-1f2300f9a2ba4d98.yaml delete mode 100644 releasenotes/notes/fix-default-masquerade-f91bfbcfe8a2a87d.yaml delete mode 100644 releasenotes/notes/fix-default-of-nova-sync-power-state-interval-90778dbcb0104130.yaml delete mode 100644 releasenotes/notes/fix-deployment-failure-when-network-disabled-156190243ff239ea.yaml delete mode 100644 releasenotes/notes/fix-dynamic-network-disabled-9f700a9e900221b6.yaml delete mode 100644 releasenotes/notes/fix-external-bridge-mtu-multiple-nics-config-43b8dc459b5694b2.yaml delete mode 100644 releasenotes/notes/fix-get-occ-config-with-role-count-greater-1-10ce2010556e5b76.yaml delete mode 100644 releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml delete mode 100644 releasenotes/notes/fix-heat-condition-for-rhel-reg-311a3dce76cc0ec1.yaml delete mode 100644 releasenotes/notes/fix-ironic-ppc64le-tftp-folder-creation-36e73d187334dbbe.yaml delete mode 100644 releasenotes/notes/fix-ironic-pxe-tftp-ipv6-a315184667e262b3.yaml delete mode 100644 releasenotes/notes/fix-krb-service-principals-do-not-filter-on-vip-in-jinja-c8f996ffed94d3cd.yaml delete mode 100644 releasenotes/notes/fix-krb-service-principals-with-service-net-map-replace-463dd1296766cc47.yaml delete mode 100644 releasenotes/notes/fix-missing-roles-for-octavia-ca024308bf15f0cf.yaml delete mode 100644 releasenotes/notes/fix-missing-tacker-password-c2ce555cdd52c102.yaml delete mode 100644 releasenotes/notes/fix-neutron-cert-key-perms.yaml-efcc17f188798cc4.yaml delete mode 100644 releasenotes/notes/fix-neutron-dhcp-tls-d62383530419f0bd.yaml delete mode 100644 releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml delete mode 100644 releasenotes/notes/fix-neutron_admin_auth_url-c88224251d8eb807.yaml delete mode 100644 releasenotes/notes/fix-octavia-api-driver-agent-failed-on-reboot-373a31d28ea72587.yaml delete mode 100644 releasenotes/notes/fix-octavia-health-manager-firewall-rule-cdffe31d580ecf4b.yaml delete mode 100644 releasenotes/notes/fix-octavia-tenant-log-facility-default-7b6d0670a51fe845.yaml delete mode 100644 releasenotes/notes/fix-odl-gui-feature-6525b8c6807fb784.yaml delete mode 100644 releasenotes/notes/fix-odl-karaf-logging-eca10973e57caa3a.yaml delete mode 100644 releasenotes/notes/fix-odl-missing-etc-config-87c33bc05f692f44.yaml delete mode 100644 releasenotes/notes/fix-odl-ovs-allowed-network-types-d196d6d40fadb1bc.yaml delete mode 100644 releasenotes/notes/fix-odl-ovs-vhostusermode-7bc2b64fd2676ca2.yaml delete mode 100644 releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml delete mode 100644 releasenotes/notes/fix-odl-tls-docker-67b8453a6be7fa58.yaml delete mode 100644 releasenotes/notes/fix-odl-update-upgrade-cache-83004abe108971c1.yaml delete mode 100644 releasenotes/notes/fix-odl-websocket-firewall-9e2f78ebaa39313f.yaml delete mode 100644 releasenotes/notes/fix-port-from-pool-host-routes-7fcc4d00cb11603d.yaml delete mode 100644 releasenotes/notes/fix-public-cert-generation-41c75be0b07a48fe.yaml delete mode 100644 releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml delete mode 100644 releasenotes/notes/fix-service-auth-url-octavia-90f19c835cb1cc0a.yaml delete mode 100644 releasenotes/notes/fix-str-replace-hostname-issue-393fb6954f12f7e6.yaml delete mode 100644 releasenotes/notes/fix-tenant-net-name-type-94a9c50c86529001.yaml delete mode 100644 releasenotes/notes/fix-tls-neutron-agents-c40d5fc779d53bfa.yaml delete mode 100644 releasenotes/notes/fix-uc-ctlplane-subnet-defaults-merged-5a6061b29618f5cf.yaml delete mode 100644 releasenotes/notes/fix_nova_host-0b82c88597703353.yaml delete mode 100644 releasenotes/notes/fixing-fs.aio-max.nr-on-compute-06447122b8e12c71.yaml delete mode 100644 releasenotes/notes/frr-support-21648d0660a810ac.yaml delete mode 100644 releasenotes/notes/galera-sst-mariabackup-5a667eed1787353f.yaml delete mode 100644 releasenotes/notes/gather-facts-variable-d7f1d74d1dc68ee9.yaml delete mode 100644 releasenotes/notes/generate-server_certs_key_passphrase-229a677df1b7f6e0.yaml delete mode 100644 releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml delete mode 100644 releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml delete mode 100644 releasenotes/notes/glance-db-purge-bb185353a45880c7.yaml delete mode 100644 releasenotes/notes/glance-internal-service-86274f56712ffaac.yaml delete mode 100644 releasenotes/notes/glance-keystonev3-d35182ba9a3778eb.yaml delete mode 100644 releasenotes/notes/glance-multistore-82d4fc260acfb355.yaml delete mode 100644 releasenotes/notes/glance_image_copy-34a3e62c48eaee0e.yaml delete mode 100644 releasenotes/notes/gnocchi-archive-policy-deprecation-09c086e9a0f9af86.yaml delete mode 100644 releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml delete mode 100644 releasenotes/notes/gnocchi-nfs-backend-90febc9f87e7df08.yaml delete mode 100644 releasenotes/notes/gnocchi-tmp-b5b48708c5dd17df.yaml delete mode 100644 releasenotes/notes/handle-any-network-name-in-ServiceNetMap-efffd0583bab827f.yaml delete mode 100644 releasenotes/notes/haproxy-frontend-backend-b253d26eeadbfb6b.yaml delete mode 100644 releasenotes/notes/haproxy-log-2805e3697cbadf49.yaml delete mode 100644 releasenotes/notes/healthcheck_disabled-7e67b45cda9afb17.yaml delete mode 100644 releasenotes/notes/heat-api-wsgi-timeout-600-640058f1ae18232c.yaml delete mode 100644 releasenotes/notes/hiera_net_ip_map-ff866b443a28bdc4.yaml delete mode 100644 releasenotes/notes/horizon-hsts-43ac1c7b602a4381.yaml delete mode 100644 releasenotes/notes/horizon-session-timeout-ba4a62b5f38cbd11.yaml delete mode 100644 releasenotes/notes/horizon_keystone_member-838d4b65cc0b0d11.yaml delete mode 100644 releasenotes/notes/horizon_logger_debug-cd70c45c1b695e4b.yaml delete mode 100644 releasenotes/notes/host_prep_config_removal-f579718021db5385.yaml delete mode 100644 releasenotes/notes/hw-machine-type-6a0bd7bc3973c15a.yaml delete mode 100644 releasenotes/notes/increase-nova-reserved-host-memory-80434e8484a29680.yaml delete mode 100644 releasenotes/notes/innodb-tuning-param-e71d2fd727c450ec.yaml delete mode 100644 releasenotes/notes/input-validation-server_certs_key_passphrase-908471f31d09f088.yaml delete mode 100644 releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml delete mode 100644 releasenotes/notes/instance-ha-18e59ab0e9697ef3.yaml delete mode 100644 releasenotes/notes/integrate-skydive-900756d8870b3876.yaml delete mode 100644 releasenotes/notes/introducing-qemutlsverify-af590e0243fe6b08.yaml delete mode 100644 releasenotes/notes/ipa-mkhomedir-c126291bcbdd0111.yaml delete mode 100644 releasenotes/notes/ipsec-8e24afb777de48e3.yaml delete mode 100644 releasenotes/notes/ipv6_defaults-7dbb62113f4e5084.yaml delete mode 100644 releasenotes/notes/ipxe_timeout-5824c87e849b1b50.yaml delete mode 100644 releasenotes/notes/ironic-boot-mode-07df530490bead50.yaml delete mode 100644 releasenotes/notes/ironic-boot-option-3f3036aa5e82ec7e.yaml delete mode 100644 releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml delete mode 100644 releasenotes/notes/ironic-conductor-groups-577543f8ca612f06.yaml delete mode 100644 releasenotes/notes/ironic-dhcpv6-stateful-address-count-ca568a32f07aec53.yaml delete mode 100644 releasenotes/notes/ironic-hardware-types-fe5140549d3bb792.yaml delete mode 100644 releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml delete mode 100644 releasenotes/notes/ironic-inspector-disjoint-inspection-ip-range-f399a938d50daadd.yaml delete mode 100644 releasenotes/notes/ironic-inspector-use-dnsmasq_ip_subnets-abba77307e761b96.yaml delete mode 100644 releasenotes/notes/ironic-networking-baremetal-29d9ad465565bb87.yaml delete mode 100644 releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml delete mode 100644 releasenotes/notes/ironic-rescue-cb1edecce357fc0b.yaml delete mode 100644 releasenotes/notes/ironic-role-specifc-conductor-group-and-networks-9c5c409253ebe3c8.yaml delete mode 100644 releasenotes/notes/ironic-rpc-transport-b637fd2a3b99fee7.yaml delete mode 100644 releasenotes/notes/ironic-workers-e752707221a86c36.yaml delete mode 100644 releasenotes/notes/ironic_cleanup_config_data-1d4ae909c0869a90.yaml delete mode 100644 releasenotes/notes/ironic_send_power_notifications-f9977e3179b68868.yaml delete mode 100644 releasenotes/notes/isilon_manila_e9677898724a11e7.yaml delete mode 100644 releasenotes/notes/keepalived_depcrecated-12ac4e1d59d29e1d.yaml delete mode 100644 releasenotes/notes/keepalived_deprecated-e0b20da2d51714b7.yaml delete mode 100644 releasenotes/notes/keepalived_removed-04c52519d7b33acb.yaml delete mode 100644 releasenotes/notes/kernel-extra-aa48704056be72cd.yaml delete mode 100644 releasenotes/notes/kernel_sysctl_role-d4f6a50d08b7a388.yaml delete mode 100644 releasenotes/notes/keystone-drop-uuid-token-provider-a4c4827c1a05556b.yaml delete mode 100644 releasenotes/notes/keystone-notification-driver-0c71165430eb57ef.yaml delete mode 100644 releasenotes/notes/keystone_authtoken-655da476bbf82e1c.yaml delete mode 100644 releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml delete mode 100644 releasenotes/notes/l2gw-driver-change-1f5b11d5676c5015.yaml delete mode 100644 releasenotes/notes/l3_agent_radvd_user-1814df18745101d6.yaml delete mode 100644 releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml delete mode 100644 releasenotes/notes/libvirt-debug-0bf95db421329ff6.yaml delete mode 100644 releasenotes/notes/libvirt-virtlogd-logs-377ff4083a58cde4.yaml delete mode 100644 releasenotes/notes/libvirt_swtpm_logs-a3ba8c5dcbd53c8c.yaml delete mode 100644 releasenotes/notes/libvirtd-tls-6de6fb35e0ac0ab1.yaml delete mode 100644 releasenotes/notes/libvirtd_use_bind_mounts_for_certs-64cb88f78538a64b.yaml delete mode 100644 releasenotes/notes/live_migration_inbound_addr_all_transports-2fc9cd74d435a367.yaml delete mode 100644 releasenotes/notes/live_migration_port_range-54c28faf0a67a3fc.yaml delete mode 100644 releasenotes/notes/live_migration_wait_for_vif_plug-6d16da261a138fb8.yaml delete mode 100644 releasenotes/notes/logging-paunch-7fa8570b380a7ebd.yaml delete mode 100644 releasenotes/notes/logrotate-containers-purge-a5587253fe6cbb28.yaml delete mode 100644 releasenotes/notes/maint-cleaning-153aa746d92a5292.yaml delete mode 100644 releasenotes/notes/make-NetCidrMapValue-contain-lists-of-cidrs-19bc98dc9bc6b819.yaml delete mode 100644 releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml delete mode 100644 releasenotes/notes/manage-networks-9b1e5be19180deff.yaml delete mode 100644 releasenotes/notes/manila-db-purge-811512391617216d.yaml delete mode 100644 releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml delete mode 100644 releasenotes/notes/match-enable_dvr-with-NeutronEnableDVR-fe8aac6c4ce52bce.yaml delete mode 100644 releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml delete mode 100644 releasenotes/notes/mdev_types-84da20d1791afbd2.yaml delete mode 100644 releasenotes/notes/memcache_use_advanced_pool-41ca18221e60c05a.yaml delete mode 100644 releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml delete mode 100644 releasenotes/notes/memcached-verbose-logs-84ca938d76c16429.yaml delete mode 100644 releasenotes/notes/memcached_hardening-2529734099da27f4.yaml delete mode 100644 releasenotes/notes/memcached_logging-f69ade3665a70a5c.yaml delete mode 100644 releasenotes/notes/merge_keys_from_services-cd17425d58b49840.yaml delete mode 100644 releasenotes/notes/metrics-qdr-97c00cc8059963fa.yaml delete mode 100644 releasenotes/notes/midonet_removal-9b91c6bb610b139e.yaml delete mode 100644 releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml delete mode 100644 releasenotes/notes/minor-update-env-20657417094d4aeb.yaml delete mode 100644 releasenotes/notes/mistral-mod-wsgi-24d41a6f427237ff.yaml delete mode 100644 releasenotes/notes/mistral-removal-df87b8dd8bf6bd4e.yaml delete mode 100644 releasenotes/notes/mistral_container-23c4432aad469f30.yaml delete mode 100644 releasenotes/notes/mistral_execs-5e1c363c9293504d.yaml delete mode 100644 releasenotes/notes/ml2-networking-ansible-0330b1203f0fc75c.yaml delete mode 100644 releasenotes/notes/mod_ssl-e7fd4db71189242e.yaml delete mode 100644 releasenotes/notes/modular-libvirt-daemons-b5ad6043d6510077.yaml delete mode 100644 releasenotes/notes/mongodb_drop-02daffbfe4975cb9.yaml delete mode 100644 releasenotes/notes/monitor_interval_ovndbs-b14c886737965300.yaml delete mode 100644 releasenotes/notes/more_params_to_configure_ulimit-82057bf64d7173a8.yaml delete mode 100644 releasenotes/notes/move-ipaclient-enroll-to-host-prep-tasks-934c6e0a9f75f15b.yaml delete mode 100644 releasenotes/notes/move_glance_nfs_mount_task_to_common_place-d284378fb16f180a.yaml delete mode 100644 releasenotes/notes/multiple-cinder-rbd-backend-9b015f99887e9241.yaml delete mode 100644 releasenotes/notes/mysql-auth-ed25519-28aaea4e69fbfdf7.yaml delete mode 100644 releasenotes/notes/mysql_bundle_gcomm_cipher-df7655775d20f4d1.yaml delete mode 100644 releasenotes/notes/n1kv_removal-5ff92235ee758cb3.yaml delete mode 100644 releasenotes/notes/named-debug-tasks-start-at-task-28e4ff92ceec85d2.yaml delete mode 100644 releasenotes/notes/network-data-external-id-properties-20d4aec2e557e980.yaml delete mode 100644 releasenotes/notes/network-resources-deletion-policy-a1aee566e650468c.yaml delete mode 100644 releasenotes/notes/network-templates-mtu-setting-a76fe47cfc97b36f.yaml delete mode 100644 releasenotes/notes/network-update-var-355dbbb4daee7f46.yaml delete mode 100644 releasenotes/notes/networking-baremetal-report-interval-d08a44a147a1846e.yaml delete mode 100644 releasenotes/notes/neutron-add-dnsmasq_enable_addr6_list-ead32a7739431607.yaml delete mode 100644 releasenotes/notes/neutron-az-config-service-3085b2c296df06c9.yaml delete mode 100644 releasenotes/notes/neutron-cleanup-services-3a8579cd03fac953.yaml delete mode 100644 releasenotes/notes/neutron-kill-script-4d8b6a0130f6fcdd.yaml delete mode 100644 releasenotes/notes/neutron-logging-plugin-support-added-b2e1292d49e087c3.yaml delete mode 100644 releasenotes/notes/neutron-ml2-overlay-ip-version-4f14932355847aa0.yaml delete mode 100644 releasenotes/notes/neutron-permitted-ethertypes-80dc7f2154786881.yaml delete mode 100644 releasenotes/notes/neutron-placement-6ea6de89bd30b592.yaml delete mode 100644 releasenotes/notes/neutron-port-predictable-ips-857b8369ca81aa4c.yaml delete mode 100644 releasenotes/notes/neutron-routed-provider-networks-2af1220125612d6f.yaml delete mode 100644 releasenotes/notes/neutron-vnic_type_prohibitlist-21e23e511ca0e922.yaml delete mode 100644 releasenotes/notes/neutron_db_rename-bbfbce1c58cadc84.yaml delete mode 100644 releasenotes/notes/new-pmem-params-18fb9c25808a7fe6.yaml delete mode 100644 releasenotes/notes/nftables-13caf0261a170667.yaml delete mode 100644 releasenotes/notes/nfv-rt-roles-9ceac2b10d565971.yaml delete mode 100644 releasenotes/notes/nic-config-ansible-vars-1943a9f2dcab5477.yaml delete mode 100644 releasenotes/notes/nic-config-script-handle-blank-line-f86553d7589826ed.yaml delete mode 100644 releasenotes/notes/nic-config-with-ansible-default-cbcaedd0a5ad49b0.yaml delete mode 100644 releasenotes/notes/no-classic-drivers-9c59b696d8b50692.yaml delete mode 100644 releasenotes/notes/no-iscsi-df52429ef64f4093.yaml delete mode 100644 releasenotes/notes/node_data_lookup_string_to_json-69362e93d862bd87.yaml delete mode 100644 releasenotes/notes/notification-driver-noop-e322ca6704a5bc50.yaml delete mode 100644 releasenotes/notes/nova-az-config-service.yaml-ed7f3846398b2291.yaml delete mode 100644 releasenotes/notes/nova-compute-image-parameters-eb3a11bf0fd4691b.yaml delete mode 100644 releasenotes/notes/nova-compute-startup-delay-fdb1f229840bd0e6.yaml delete mode 100644 releasenotes/notes/nova-enable_vtpm-b24db74522c8a2eb.yaml delete mode 100644 releasenotes/notes/nova-instance_usage_audit-dfb17d89446352ae.yaml delete mode 100644 releasenotes/notes/nova-live-migration-permit-postcopy-autoconverge-ca1719fd2abed45f.yaml delete mode 100644 releasenotes/notes/nova-machine-type-default-changed-27244a925f6d6200.yaml delete mode 100644 releasenotes/notes/nova-max_concurrent_builds-f900d84f35704452.yaml delete mode 100644 releasenotes/notes/nova-memlock-increase-066ed22764ed3ce1.yaml delete mode 100644 releasenotes/notes/nova-nfs-parms-role-specific-527915c6e99ceb89.yaml delete mode 100644 releasenotes/notes/nova-placement-43fd353e6870461c.yaml delete mode 100644 releasenotes/notes/nova-purge-maxdelay-fadeea1dc59e8086.yaml delete mode 100644 releasenotes/notes/nova-rbd-pool-role-specific-010f6072d641d84f.yaml delete mode 100644 releasenotes/notes/nova-scheduler-tunables-8c1dbab10b289480.yaml delete mode 100644 releasenotes/notes/nova_add_nfs_vers_parameter-62b9e9d6150358d1.yaml delete mode 100644 releasenotes/notes/nova_api_live_migration_policy-04fe402b554085b9.yaml delete mode 100644 releasenotes/notes/nova_api_max_limit-support-43fe9792eca63599.yaml delete mode 100644 releasenotes/notes/nova_api_show_host_status-f0dfaf4c2b0c536f.yaml delete mode 100644 releasenotes/notes/nova_cell_v2_discover_host_use_internal_api-1bebb3e9c6e69113.yaml delete mode 100644 releasenotes/notes/nova_change_neutron_auth_url_to_internal_endpoint-aaf0e550750335eb.yaml delete mode 100644 releasenotes/notes/nova_compute_default_cpu_mode-cda2bb3e56463b3a.yaml delete mode 100644 releasenotes/notes/nova_compute_el8_legacy-273fb004d090191a.yaml delete mode 100644 releasenotes/notes/nova_compute_fix_log_permissions-e866f91848d647fb.yaml delete mode 100644 releasenotes/notes/nova_compute_ksm-444f1cc51ceafb66.yaml delete mode 100644 releasenotes/notes/nova_compute_nfs_default_vers-e61d0bf78794be28.yaml delete mode 100644 releasenotes/notes/nova_compute_nofile-0427e49cc8ae70a6.yaml delete mode 100644 releasenotes/notes/nova_cron_archive_delete_parameters-19288fa689965c58.yaml delete mode 100644 releasenotes/notes/nova_cron_archive_deleted_rows-task_log-efbf0aac77aab128.yaml delete mode 100644 releasenotes/notes/nova_custom_libvirt-guests_unit-7ac2c4b5511ca549.yaml delete mode 100644 releasenotes/notes/nova_deprecate_consoleauth-ffb93ffa5393b630.yaml delete mode 100644 releasenotes/notes/nova_direct_glance_rbd_download-e945933da26f10f0.yaml delete mode 100644 releasenotes/notes/nova_file_backed_memory_and_memory_backing_dir_support_for_qemu-accfda4919b3d9dd.yaml delete mode 100644 releasenotes/notes/nova_image_cache_ttl-824f241363b9dd4e.yaml delete mode 100644 releasenotes/notes/nova_libvirt_max_queues-8024fc63105bd25d.yaml delete mode 100644 releasenotes/notes/nova_libvirt_mem_stats_period_seconds-b9b606232629cb38.yaml delete mode 100644 releasenotes/notes/nova_libvirt_num_pcie_ports-f904bf0fb9a7b19e.yaml delete mode 100644 releasenotes/notes/nova_libvirt_queue_size_parameter-b4409a91c553ce1b.yaml delete mode 100644 releasenotes/notes/nova_libvirt_ssl_cert_simplification-dbee541be9f55ce5.yaml delete mode 100644 releasenotes/notes/nova_libvirtd_log_filters-63e9e6501d779dd9.yaml delete mode 100644 releasenotes/notes/nova_libvirtd_log_level-b1393b582360ad4f.yaml delete mode 100644 releasenotes/notes/nova_libvirtd_log_outputs-bf4091c0c7c5a968.yaml delete mode 100644 releasenotes/notes/nova_libvirtd_tls_priority-d0129f804d7ca847.yaml delete mode 100644 releasenotes/notes/nova_local_metadata_per_cell-c374bd46a825c07e.yaml delete mode 100644 releasenotes/notes/nova_metadata_http_cert_metadata-274e7e8a66727983.yaml delete mode 100644 releasenotes/notes/nova_metadata_wsgi-bfb240bc84194d05.yaml delete mode 100644 releasenotes/notes/nova_migration_limit_access-20be8d69686ca95c.yaml delete mode 100644 releasenotes/notes/nova_migration_target_libvirt_socket-585fa579e5b2704d.yaml delete mode 100644 releasenotes/notes/nova_move_host_discovery_to_external_post_deploy_tasks-e978560ee59b8b56.yaml delete mode 100644 releasenotes/notes/nova_nfs_default_secontext-5ad33675aaf0b521.yaml delete mode 100644 releasenotes/notes/nova_nfs_enabled_podman-a92ea12cd4cd92c8.yaml delete mode 100644 releasenotes/notes/nova_novnc_network-83a1479bf227f867.yaml delete mode 100644 releasenotes/notes/nova_novnc_proxy_per_cell-f2d0c50d3c21c46c.yaml delete mode 100644 releasenotes/notes/nova_novnc_proxy_ssl_support-edc7bc13d0cd6d2b.yaml delete mode 100644 releasenotes/notes/nova_placement_custom_provider-21203c3ff54c878c.yaml delete mode 100644 releasenotes/notes/nova_qemu_enable_config-c89016b4d93802d6.yaml delete mode 100644 releasenotes/notes/nova_qemu_native_tls_encryption_on_nbd_for_disk_migration-2e16003c4764a399.yaml delete mode 100644 releasenotes/notes/nova_remove_metadata_port_from_api-cdfaf8fb607f6427.yaml delete mode 100644 releasenotes/notes/nova_remove_nova-consoleauth-227cbeb44e9ab5ef.yaml delete mode 100644 releasenotes/notes/nova_rpc_healthcheck-adbe5307dc04eeac.yaml delete mode 100644 releasenotes/notes/nova_run_cell_discovery_on_each_run-11dbb6096ebbf51b.yaml delete mode 100644 releasenotes/notes/nova_run_chown_on_every_deploy-c366af9898ecaeed.yaml delete mode 100644 releasenotes/notes/nova_scheduler_limit_tenants_to_placement_aggregate-9889b0a73dd1d042.yaml delete mode 100644 releasenotes/notes/nova_scheduler_placement_aggregate_required_for_tenants-6c7d90fd01bcc88d.yaml delete mode 100644 releasenotes/notes/nova_scheduler_query_placement_for_availability_zone-ffd415710a9cb903.yaml delete mode 100644 releasenotes/notes/nova_statedir_ownership-54c75dfe8ad64b4f.yaml delete mode 100644 releasenotes/notes/nova_templated_cells_db_urls-2eb151090c49c51d.yaml delete mode 100644 releasenotes/notes/nova_templated_cells_transport_url-275f0b707d9227ab.yaml delete mode 100644 releasenotes/notes/nova_use_nova_api_db_for_placement-eea44dd48c768f04.yaml delete mode 100644 releasenotes/notes/nova_virtlogd_wrapper-120fcfcfa0787b2b.yaml delete mode 100644 releasenotes/notes/nova_wait_for_placement_service_disable_cert_verify-45f532d7a924df86.yaml delete mode 100644 releasenotes/notes/nsx-support-1254839718d8df8c.yaml delete mode 100644 releasenotes/notes/ntp-poll-options-3019fe517e012300.yaml delete mode 100644 releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml delete mode 100644 releasenotes/notes/numa-aware-vswitches-cd8df5dd9b725230.yaml delete mode 100644 releasenotes/notes/objectstorage_deprecated_params-f7642b6541a0d09c.yaml delete mode 100644 releasenotes/notes/octavia-1687026-c01313aab53f55a4.yaml delete mode 100644 releasenotes/notes/octavia-amphora-image-defaults-0d9efe1a0222b76d.yaml delete mode 100644 releasenotes/notes/octavia-amphora-image-format-f2f3f494e6fbe82c.yaml delete mode 100644 releasenotes/notes/octavia-amphora-ssh-245a21a35598440a.yaml delete mode 100644 releasenotes/notes/octavia-change-event_streamer_driver-default-e5152c28713e7707.yaml delete mode 100644 releasenotes/notes/octavia-enable-log-offload-by-default-53faec2d64688109.yaml delete mode 100644 releasenotes/notes/octavia-log-offload-d1617e767f688da1.yaml delete mode 100644 releasenotes/notes/octavia-mount-amphora-images-dir-31da44a544628687.yaml delete mode 100644 releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml delete mode 100644 releasenotes/notes/octavia-standalone-f1f1121ba77981c3.yaml delete mode 100644 releasenotes/notes/octavia_service_workers-70887b5882ce0bb0.yaml delete mode 100644 releasenotes/notes/odl-dont-mount-data-folder-c993df185af375a8.yaml delete mode 100644 releasenotes/notes/odl-enable-tls-377fbbfff6f67230.yaml delete mode 100644 releasenotes/notes/odl-ovs-hw-offload-54b662c633e9eda8.yaml delete mode 100644 releasenotes/notes/odl-port-binding-d420cac81f714778.yaml delete mode 100644 releasenotes/notes/odl-qos-48b70c804755e3a5.yaml delete mode 100644 releasenotes/notes/odl-user-a4c58ac0c3a64d90.yaml delete mode 100644 releasenotes/notes/odl_delete_data_folder-b8c2f9a9382fd692.yaml delete mode 100644 releasenotes/notes/odl_delete_karaf_folder_on_host-b81465f62fe422d6.yaml delete mode 100644 releasenotes/notes/odl_dscp_marking_inheritance-41e41ff5f0c281d4.yaml delete mode 100644 releasenotes/notes/odl_endpoint-74b00c6ed38e9a98.yaml delete mode 100644 releasenotes/notes/odl_stand_alone_log_config-5358ec26f7e7a970.yaml delete mode 100644 releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml delete mode 100644 releasenotes/notes/online-migration-external-upgrade-5093de6bd8993b5c.yaml delete mode 100644 releasenotes/notes/opendaylight-manage-repos-9eaf900c08e8d96f.yaml delete mode 100644 releasenotes/notes/openshift-removal-d7efdc1696a1999e.yaml delete mode 100644 releasenotes/notes/openstack-client-service-86d28dab98f1763f.yaml delete mode 100644 releasenotes/notes/os-net-config-mappings-move-to-ansible-940a48711870ac58.yaml delete mode 100644 releasenotes/notes/os-tripleo-ports-output-hostroutes-3c710bd9f5641101.yaml delete mode 100644 releasenotes/notes/oslo-messaging-separate-backends-2d2221066f88f479.yaml delete mode 100644 releasenotes/notes/overrideable-ssh-options-1bd4a78408eb6c2c.yaml delete mode 100644 releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml delete mode 100644 releasenotes/notes/ovn-dbs-ha-fix-minor-update-issue-3a1206549e3b75aa.yaml delete mode 100644 releasenotes/notes/ovn-dpdk-15e8747068682f91.yaml delete mode 100644 releasenotes/notes/ovn-emit-need-to-frag-config-option-33fe0ee3f6ac0372.yaml delete mode 100644 releasenotes/notes/ovn-fcd4b0168e6745a8.yaml delete mode 100644 releasenotes/notes/ovn-ha-c0139ac519680872.yaml delete mode 100644 releasenotes/notes/ovn-monitor-all-2fefb215c6f7166c.yaml delete mode 100644 releasenotes/notes/ovn-neutron-metadata-workers-7623588cca0a6948.yaml delete mode 100644 releasenotes/notes/ovn-port-forwarding-1c954c1c57e136d0.yaml delete mode 100644 releasenotes/notes/ovn-remove-nonha-env-files-7b68a1e656b9f753.yaml delete mode 100644 releasenotes/notes/ovn-security-group-logging-0542b777ea58b5f6.yaml delete mode 100644 releasenotes/notes/ovn-separate-vip-ed28ffa3ff77f865.yaml delete mode 100644 releasenotes/notes/ovn-set-dns-servers-config-option-e8f91ad2a05df9a9.yaml delete mode 100644 releasenotes/notes/ovn_igmp_snooping_support-eccdecde74f4b9c8.yaml delete mode 100644 releasenotes/notes/ovn_metadata_agent_support_debug_mode-9d0656ce156a3c91.yaml delete mode 100644 releasenotes/notes/ovn_metadata_remote_probe_interval-b90c7eea0c3fb2f6.yaml delete mode 100644 releasenotes/notes/ovn_openflow_probe_interval-fd99301d95aac62e.yaml delete mode 100644 releasenotes/notes/ovn_remote_probe_interval-023b3fa671f88101.yaml delete mode 100644 releasenotes/notes/ovn_tunnel_encap_type-04df21d622874c27.yaml delete mode 100644 releasenotes/notes/ovs-2-7-support-for-dpdk-fe665cf9c6b0a750.yaml delete mode 100644 releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-permission-workaround-20aaebcc8d6009ec.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-permissions-50c5b33334ff4711.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-perms-deprecate-cfddb148fce9656d.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-pmd-params-55df11e67acb6736.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-remove-deprecated-params-9b4e46eb336301b2.yaml delete mode 100644 releasenotes/notes/ovs-dpdk-tso-f96406621ec69bd1.yaml delete mode 100644 releasenotes/notes/ovs-hw-offload-a6bf0fa9c39a8204.yaml delete mode 100644 releasenotes/notes/ovs-revalidator-handler-threads.yaml-f5a12d1066b042f1.yaml delete mode 100644 releasenotes/notes/ovs_dpdk_pmd-74f4cfa0ef280cc0.yaml delete mode 100644 releasenotes/notes/ovsdpdk_netcontrold-0a1d4f3e26cea0c6.yaml delete mode 100644 releasenotes/notes/pacemaker-by-default-c5d28ee8dc897c62.yaml delete mode 100644 releasenotes/notes/pacemaker-cluster-common-tag-45c4e8a6e7b08735.yaml delete mode 100644 releasenotes/notes/pacemaker-cluster-common-tag-podman-f9a71344af5c73d6.yaml delete mode 100644 releasenotes/notes/pacemaker-enable-cluster-common-tag-fe03c47bbcbba13c.yaml delete mode 100644 releasenotes/notes/pacemaker-enable-cluster-full-tag-f868a6f0b1f16018.yaml delete mode 100644 releasenotes/notes/pacemaker-rabbitmq-b1b5fc2b47cb84e4.yaml delete mode 100644 releasenotes/notes/parameter-merge-strategy-endpoint-map-83b8321c72421fd6.yaml delete mode 100644 releasenotes/notes/paunch_retiring-9aab8248a0b2973b.yaml delete mode 100644 releasenotes/notes/pcmktlspriorities-4315010185adf45a.yaml delete mode 100644 releasenotes/notes/ping-gateway-before-controllers-e029e81961dbaee8.yaml delete mode 100644 releasenotes/notes/placement-policies-db2704ea63d0bdad.yaml delete mode 100644 releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml delete mode 100644 releasenotes/notes/port-physnet-cidr-map-7032fec5a1905314.yaml delete mode 100644 releasenotes/notes/power_state-457f12af30b9e341.yaml delete mode 100644 releasenotes/notes/powerflex-driver-abd8e372280c44e7.yaml delete mode 100644 releasenotes/notes/powermax-driver-d428e372280c44e6.yaml delete mode 100644 releasenotes/notes/powerstore-driver-e528e372280c44e6.yaml delete mode 100644 releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml delete mode 100644 releasenotes/notes/pre_deploy_setup_tasks-addition-63a9e4dfccc2132a.yaml delete mode 100644 releasenotes/notes/pre_upgrade_rolling_tasks-6345e98e8283a907.yaml delete mode 100644 releasenotes/notes/ps-san_private_key-5aa111e7907ba600.yaml delete mode 100644 releasenotes/notes/ptp-a1bf70fbfddd1830.yaml delete mode 100644 releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml delete mode 100644 releasenotes/notes/puppet-pacemaker-env-file-removed-1e39f85ff7a54be8.yaml delete mode 100644 releasenotes/notes/pure_add_iscsi_cider_list-4032429b7dc2f1cb.yaml delete mode 100644 releasenotes/notes/pure_add_nvme-9ec98cd889128270.yaml delete mode 100644 releasenotes/notes/pure_storage_update_params-4de801b1ed2d0744.yaml delete mode 100644 releasenotes/notes/purestorage_flashblade-75cbbdae48f233e0.yaml delete mode 100644 releasenotes/notes/query_placement_for_image_type-0bfbef90bb8c3871.yaml delete mode 100644 releasenotes/notes/query_placement_for_routed_network_aggregates-b23a7279643c6a70.yaml delete mode 100644 releasenotes/notes/rabbit-ignore-partitions-0a0ae8c636c256ea.yaml delete mode 100644 releasenotes/notes/rabbit-net_ticktime-519f904e52218c2d.yaml delete mode 100644 releasenotes/notes/rabbitmq-enable-management-plugin-94b27747e4f5e685.yaml delete mode 100644 releasenotes/notes/rabbitmq-erl-args-9029cf4605d63dd9.yaml delete mode 100644 releasenotes/notes/rabbitmq-nr-mirrorqueues-7d0451756a67eab4.yaml delete mode 100644 releasenotes/notes/rbd-disk-cache-modes-284a73271741ea62.yaml delete mode 100644 releasenotes/notes/redfish-9203af1f7bf02bc5.yaml delete mode 100644 releasenotes/notes/redis_disabled_by_default-9992b2bae9b149cd.yaml delete mode 100644 releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml delete mode 100644 releasenotes/notes/refactor-service-vips-a48739c1b2fab207.yaml delete mode 100644 releasenotes/notes/reintroduce-keystone_corn-85290afe6bf8b019.yaml delete mode 100644 releasenotes/notes/releasenotes/notes/add_params_to_configure_rabbitmq_backlog-68da3a07ef506f57.yaml delete mode 100644 releasenotes/notes/releasenotes/notes/add_support_for_timemaster-a8dc3e4d5db4e8b3.yaml delete mode 100644 releasenotes/notes/releasenotes/notes/support-for-disable_compute_service_check_for_ffu-19a91d20e146056c.yaml delete mode 100644 releasenotes/notes/remove-AdminEmail-70cf5b218f916734.yaml delete mode 100644 releasenotes/notes/remove-NeutronCorePluginML2OVN-8641be21f069c1bd.yaml delete mode 100644 releasenotes/notes/remove-NovaPlacement-c33569f37b788eaf.yaml delete mode 100644 releasenotes/notes/remove-ValidateNtp-15724eaa8345aa4f.yaml delete mode 100644 releasenotes/notes/remove-availability-zone-filter-24a0f480bb2a345b.yaml delete mode 100644 releasenotes/notes/remove-cavium-2d8cbe8a5085517a.yaml delete mode 100644 releasenotes/notes/remove-ceilometer-cron-85362e197ba245a0.yaml delete mode 100644 releasenotes/notes/remove-cinder-powerstore_appliances-0a4905808b2b66f1.yaml delete mode 100644 releasenotes/notes/remove-default-password-interface-55a4e85ef0ccef2f.yaml delete mode 100644 releasenotes/notes/remove-default-role-name-in-merge-nic-config-params-script-d670279038411978.yaml delete mode 100644 releasenotes/notes/remove-deployed-server-bootstrap-resource-7de0596b90f1b15a.yaml delete mode 100644 releasenotes/notes/remove-deployed-server-outputs-d990bf75c7d8ef05.yaml delete mode 100644 releasenotes/notes/remove-deployed-server-roles-data.yaml-090c4f589ca493fd.yaml delete mode 100644 releasenotes/notes/remove-deployment-swift-data-11afcb4e9925d7b2.yaml delete mode 100644 releasenotes/notes/remove-deprecated-ceilometer-services-edc0c260d7009414.yaml delete mode 100644 releasenotes/notes/remove-deprecated-dcn-hci-1aca499135c31e17.yaml delete mode 100644 releasenotes/notes/remove-deprecated-deployed-server-bootstrap-07590a3cf4688cc9.yaml delete mode 100644 releasenotes/notes/remove-deprecated-manila-ceph-pgnum-parameters-b7bb9c95e30467d5.yaml delete mode 100644 releasenotes/notes/remove-disable-config-download-environment-04f61210d0a304a1.yaml delete mode 100644 releasenotes/notes/remove-disabled-glance-registry-2738b41a2e200d95.yaml delete mode 100644 releasenotes/notes/remove-docker-ef1f16744ff03979.yaml delete mode 100644 releasenotes/notes/remove-ec2metadata-parameter-36e75f70d2322e55.yaml delete mode 100644 releasenotes/notes/remove-heat-api-cloudwatch-577417e2b2db3b15.yaml delete mode 100644 releasenotes/notes/remove-heat-installed-rear-feae3174fb303416.yaml delete mode 100644 releasenotes/notes/remove-hpe-lefthand-23c6beaa777e3dfb.yaml delete mode 100644 releasenotes/notes/remove-ironic-conductor-iscsi-module-load-1e7d3c72a222196a.yaml delete mode 100644 releasenotes/notes/remove-iscsi-from-undercloud-b7424ffcdc9c02ea.yaml delete mode 100644 releasenotes/notes/remove-legacy-networking-e9b7f996722bb93b.yaml delete mode 100644 releasenotes/notes/remove-logging-groups-b6cee238a235bbc9.yaml delete mode 100644 releasenotes/notes/remove-logging-sources-e573f5281798a069.yaml delete mode 100644 releasenotes/notes/remove-manila-vmax-61c1559fd3e58642.yaml delete mode 100644 releasenotes/notes/remove-misspelt-param-eeeea6fab711932e.yaml delete mode 100644 releasenotes/notes/remove-networking-ansible-c3fca3209b864aa0.yaml delete mode 100644 releasenotes/notes/remove-networking-bigswitch-9ae5c280990284a2.yaml delete mode 100644 releasenotes/notes/remove-neutron-external-bridge-parameter-a0c7e7ff8d937541.yaml delete mode 100644 releasenotes/notes/remove-neutron-fujitsu-plugins-6414a5d6962e3260.yaml delete mode 100644 releasenotes/notes/remove-neutron-lbaas-e72025b67de3563b.yaml delete mode 100644 releasenotes/notes/remove-neutron-server-mapping-211ca9751dec268d.yaml delete mode 100644 releasenotes/notes/remove-non-config-download-deprecations-a40fb47c0c0a49f1.yaml delete mode 100644 releasenotes/notes/remove-novajoin-3ccef190c99c419b.yaml delete mode 100644 releasenotes/notes/remove-nsx-support-e118640eebf0ce23.yaml delete mode 100644 releasenotes/notes/remove-ntp-20905abec5281f54.yaml delete mode 100644 releasenotes/notes/remove-octavia-service-plugin-from-neutron-config-b923cca547d9dd52.yaml delete mode 100644 releasenotes/notes/remove-octavia_post_workflow_name-84083c092cf79c8d.yaml delete mode 100644 releasenotes/notes/remove-odl-735ca4129c2a450e.yaml delete mode 100644 releasenotes/notes/remove-odl-dlux-gui-4728de06c973cd53.yaml delete mode 100644 releasenotes/notes/remove-old-standalone-environment-file-ee8b07d913b4fcca.yaml delete mode 100644 releasenotes/notes/remove-old-tls-envs-137cf19b55526a81.yaml delete mode 100644 releasenotes/notes/remove-option-remove-unused-packages-on-upgrade-99bfd428dd52b4c9.yaml delete mode 100644 releasenotes/notes/remove-ovn-vif_type-1c09bf29d1bd38da.yaml delete mode 100644 releasenotes/notes/remove-ovndbinternal-endpoint-295f452c938aa06c.yaml delete mode 100644 releasenotes/notes/remove-pacemaker-passwords-default-values-dd0cfdf7922ecf90.yaml delete mode 100644 releasenotes/notes/remove-puppet-masquerade-28ae63cfa7f613d6.yaml delete mode 100644 releasenotes/notes/remove-sahara-ffec9acf94d72905.yaml delete mode 100644 releasenotes/notes/remove-stack-action-parameter-285044bb49ad1f5b.yaml delete mode 100644 releasenotes/notes/remove-support-for-puppet-ceph-bdafca24a59e7075.yaml delete mode 100644 releasenotes/notes/remove-tacker-1375cfa1a2ac8fef.yaml delete mode 100644 releasenotes/notes/remove-tempest-container-support-8950767b0047c9af.yaml delete mode 100644 releasenotes/notes/remove-uc-nova-params-edff776d30992aa3.yaml delete mode 100644 releasenotes/notes/remove-undercloud-minion-198ce8ea01c4e366.yaml delete mode 100644 releasenotes/notes/remove-undercloud-specific-services-23046e607565d36d.yaml delete mode 100644 releasenotes/notes/remove-unused-glance-params-ae24869fef23fa06.yaml delete mode 100644 releasenotes/notes/remove-unused-nova-ports-a9f4d9b0ac3e21c9.yaml delete mode 100644 releasenotes/notes/remove-veritas-hyperscale-a0b5da7d882c853f.yaml delete mode 100644 releasenotes/notes/remove-vpp-d950a159ad7b3371.yaml delete mode 100644 releasenotes/notes/remove-xinetd-859f2a31631774d1.yaml delete mode 100644 releasenotes/notes/remove_bootstrap_nodeid_ip-d97ec1da4f73599c.yaml delete mode 100644 releasenotes/notes/remove_deprecated-storage-templates-and-params-a2707785af9f617b.yaml delete mode 100644 releasenotes/notes/remove_deprecated_nfv_envs-30f6b3552d34d489.yaml delete mode 100644 releasenotes/notes/remove_derived_params_osd_type_osd_count-6fe114130d1e4b2e.yaml delete mode 100644 releasenotes/notes/remove_disable_upgrade_deployment_flag-872df40d7ff171b8.yaml delete mode 100644 releasenotes/notes/remove_ffwd_tasks-d1ab630d96a66a59.yaml delete mode 100644 releasenotes/notes/remove_pre_network-fb38d9c2095e0597.yaml delete mode 100644 releasenotes/notes/rename-admin-token-ae1b694f063035c8.yaml delete mode 100644 releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml delete mode 100644 releasenotes/notes/replication_probe_interval_ovn_dbs-4aab423180fb6847.yaml delete mode 100644 releasenotes/notes/restart-certmonger-244416f537859bac.yaml delete mode 100644 releasenotes/notes/restore-ceilometer-disable-templates-82ed8b7d33fc3bda.yaml delete mode 100644 releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml delete mode 100644 releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml delete mode 100644 releasenotes/notes/rhel-registration-2752c4e0a14cec98.yaml delete mode 100644 releasenotes/notes/rhsm-service-fbec46930264b355.yaml delete mode 100644 releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml delete mode 100644 releasenotes/notes/rm-special-manila-docker-envs-5a376db667ddfd1c.yaml delete mode 100644 releasenotes/notes/role-image-prepare-602ee8bf86463a97.yaml delete mode 100644 releasenotes/notes/role-support-for-upgrade-to-dvr-containers-bc876f82f3e9f139.yaml delete mode 100644 releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml delete mode 100644 releasenotes/notes/roles-data-validation-7845702b5ed85366.yaml delete mode 100644 releasenotes/notes/roles_deprecated_params-50b4bbe8b9e4abc7.yaml delete mode 100644 releasenotes/notes/rpc_reponse_timeout-6feb5f62819b2eaf.yaml delete mode 100644 releasenotes/notes/rsyslog-5dc93db5eb8fc953.yaml delete mode 100644 releasenotes/notes/run-octavia-under-apache-94afa32e4f1ae3e1.yaml delete mode 100644 releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml delete mode 100644 releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml delete mode 100644 releasenotes/notes/sat_capsule-bb59fad44c17f97f.yaml delete mode 100644 releasenotes/notes/sc-driver-a428e372280c44e6.yaml delete mode 100644 releasenotes/notes/scale_tasks-2042b294d074b37a.yaml delete mode 100644 releasenotes/notes/scaleio-deprecate-old-driver-ab28e372280c44e6.yaml delete mode 100644 releasenotes/notes/security-compliance-f4f7ae077b148af1.yaml delete mode 100644 releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml delete mode 100644 releasenotes/notes/service-role-name-0b8609d314564885.yaml delete mode 100644 releasenotes/notes/set-DhcpAgentNotification-to-be-false-by-default-4fe7acaa60d2a95f.yaml delete mode 100644 releasenotes/notes/set-amphora-timezone-d62de55c993c5d70.yaml delete mode 100644 releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml delete mode 100644 releasenotes/notes/set-tenant-net-mtu-as-neutron-global-physnet-mtu-5e3515a97e8e0367.yaml delete mode 100644 releasenotes/notes/set_java_opts_from_tripleo-d969b1151ec244a0.yaml delete mode 100644 releasenotes/notes/setup_timeouts_ovn_dbs-141be475dd2cd7ae.yaml delete mode 100644 releasenotes/notes/skip-podman-purge-undercloud-a3a30b5ac3a0951b.yaml delete mode 100644 releasenotes/notes/snmp_firewall-ab17f60ba1ec71d2.yaml delete mode 100644 releasenotes/notes/snmp_listen-2364188f73d43b14.yaml delete mode 100644 releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml delete mode 100644 releasenotes/notes/sriov-agent-extentions-08e1d9fb89c2efbf.yaml delete mode 100644 releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml delete mode 100644 releasenotes/notes/sriov-role-1ef30615048239c7.yaml delete mode 100644 releasenotes/notes/sriov-vf-param-removed-fd6f4519b4eeb05a.yaml delete mode 100644 releasenotes/notes/ssh-server-options-overrides-f677913bfd65efe1.yaml delete mode 100644 releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml delete mode 100644 releasenotes/notes/ssh_pass_auth-8cab3ca5a50d2a5a.yaml delete mode 100644 releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml delete mode 100644 releasenotes/notes/stack-name-input-73f4d4d052f1377e.yaml delete mode 100644 releasenotes/notes/stackrc-baremetal-version-309809c01105095f.yaml delete mode 100644 releasenotes/notes/standalone-default-openstack-services-4ca49c6795bf8ccc.yaml delete mode 100644 releasenotes/notes/standalone-selinux-configuration-39a0c7285d8e4c66.yaml delete mode 100644 releasenotes/notes/start-sequence-1-deploy-steps-59043a5ea87a83f8.yaml delete mode 100644 releasenotes/notes/subnet-mapping-into-services-999a2c5a90b85709.yaml delete mode 100644 releasenotes/notes/support-glance-at-dcn-sites-6163b8f5333e31a7.yaml delete mode 100644 releasenotes/notes/support-hostname-deployment-server-blacklist-532221db0e7d2bef.yaml delete mode 100644 releasenotes/notes/support_IPv6_for_tenant_networks-30938bfdde547969.yaml delete mode 100644 releasenotes/notes/svirt_sandbox_file_t-to-container_file_t-f4914561f6e9e4c7.yaml delete mode 100644 releasenotes/notes/swap-prepuppet-and-postpuppet-to-preconfig-and-postconfig-debd5f28bc578d51.yaml delete mode 100644 releasenotes/notes/swift-additional-healthchecks-c286f7b7116e6543.yaml delete mode 100644 releasenotes/notes/swift-barbican-key-id-fix-108f8b58a5092d0a.yaml delete mode 100644 releasenotes/notes/swift-container-sharder-b96c2fa43aa66aac.yaml delete mode 100644 releasenotes/notes/swift-create-d1-containerized-8eda040d9088be7d.yaml delete mode 100644 releasenotes/notes/swift-create-local-dir-7671f7967620e261.yaml delete mode 100644 releasenotes/notes/swift-do-not-chown-recursive-1086879873cd9fa6.yaml delete mode 100644 releasenotes/notes/swift-fix-ring-sync-7bf3ddbb1ea1e342.yaml delete mode 100644 releasenotes/notes/swift-mount-by-uuid-7744fe7696db4b85.yaml delete mode 100644 releasenotes/notes/swift-mount-var-cache-7e95199532b542c4.yaml delete mode 100644 releasenotes/notes/swift-prevent-relabeling-b9721aa5a1abda6e.yaml delete mode 100644 releasenotes/notes/swift-proxy-recoverable-node-timeout-1fcd7a83f983e61b.yaml delete mode 100644 releasenotes/notes/swift-proxy-use-hash-suffix-a0641435337a4b05.yaml delete mode 100644 releasenotes/notes/swift-recon-cron-8a3023ab693486bd.yaml delete mode 100644 releasenotes/notes/swift-refresh-rings-cc327f998490b0df.yaml delete mode 100644 releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml delete mode 100644 releasenotes/notes/swift-single-replica-cleanup-fdf72b9e462185c7.yaml delete mode 100644 releasenotes/notes/swift_external-d9870450f191b89a.yaml delete mode 100644 releasenotes/notes/swift_hash_path_prefix-5ecc32ed5d78158b.yaml delete mode 100644 releasenotes/notes/switch-manila-api-to-httpd-support-tls-9b995fe4113b2412.yaml delete mode 100644 releasenotes/notes/switch-to-default-ovn-driver-ab4ae9d348158d61.yaml delete mode 100644 releasenotes/notes/tag-common-tasks-4a78275787655fdd.yaml delete mode 100644 releasenotes/notes/tag-step-plays-b1b1ea7584f1665d.yaml delete mode 100644 releasenotes/notes/telemetry-role-services-7dba4996aff89027.yaml delete mode 100644 releasenotes/notes/timezone-ansible-c7ffcd118f881b82.yaml delete mode 100644 releasenotes/notes/tls-inject-86ef6706e68f5740.yaml delete mode 100644 releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml delete mode 100644 releasenotes/notes/tripleo-firewall-ansible-3928f04478a09668.yaml delete mode 100644 releasenotes/notes/tripleo-kernel-hugepages-424c19a4b1579af8.yaml delete mode 100644 releasenotes/notes/tripleo-nova-nfs-ead2827338aa9519.yaml delete mode 100644 releasenotes/notes/tripleo-ssh-known-hosts-5c64b1a90d61d7f2.yaml delete mode 100644 releasenotes/notes/tripleo-validations-b231184816a9ab64.yaml delete mode 100644 releasenotes/notes/tripleo_ui-1923e35ee139f777.yaml delete mode 100644 releasenotes/notes/tuned-baremetal-puppet-deprecated-3e97347917905254.yaml delete mode 100644 releasenotes/notes/tuned-service-650c0eec1cf12a4d.yaml delete mode 100644 releasenotes/notes/tuned_custom_profile-25d1f4a2bc217216.yaml delete mode 100644 releasenotes/notes/ucsm_removal-929fe9971ed4acc8.yaml delete mode 100644 releasenotes/notes/uefi-boot-mode-a8b1b416a0e9cdc4.yaml delete mode 100644 releasenotes/notes/uefi_ipxe_bootfile_name-f2c9cc8971dc1ed8.yaml delete mode 100644 releasenotes/notes/undercloud-add-support-enable-routed-networks-on-update-294acc2136c0fe47.yaml delete mode 100644 releasenotes/notes/undercloud-selinux-37fc6f69a9753109.yaml delete mode 100644 releasenotes/notes/undercloud-tripleo-admin-7043cc0b2e4bfb8a.yaml delete mode 100644 releasenotes/notes/undercloud-upgrade-ephemeral-heat-c838a9c61fc742a3.yaml delete mode 100644 releasenotes/notes/unity_cinder_e9872898724a11e7.yaml delete mode 100644 releasenotes/notes/unity_manila_1967789872aa11e7.yaml delete mode 100644 releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml delete mode 100644 releasenotes/notes/update-cinder-netapp-parameters-cf6da846e72007df.yaml delete mode 100644 releasenotes/notes/update-dellemc-vnx-templates-74a0596fac39b488.yaml delete mode 100644 releasenotes/notes/update-heat-templates-to-queens-dec638e7b34bed09.yaml delete mode 100644 releasenotes/notes/update-lb-mgmt-subnet-to-class-b-1cd832ef08a30c85.yaml delete mode 100644 releasenotes/notes/update-manage-event-pipeline-0a21e9ca92cbff4f.yaml delete mode 100644 releasenotes/notes/update-metric-delay-default-963d073026e2cc15.yaml delete mode 100644 releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml delete mode 100644 releasenotes/notes/update-pcmk-resource-by-default-ed54100721f55a30.yaml delete mode 100644 releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml delete mode 100644 releasenotes/notes/update_manila_unity_driver-43aeb041029c4e7f.yaml delete mode 100644 releasenotes/notes/update_manila_vnx_driver-678b22c4fcd81fcf.yaml delete mode 100644 releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml delete mode 100644 releasenotes/notes/update_serial-785ff794ff88fb2e.yaml delete mode 100644 releasenotes/notes/upgrade-stack-action-94598796a9d3511f.yaml delete mode 100644 releasenotes/notes/upgrade_tasks_cleanup-c913f3f30f4a1db6.yaml delete mode 100644 releasenotes/notes/upgrade_tasks_remove_tags_add_when-99f18cdb3cf58f64.yaml delete mode 100644 releasenotes/notes/upgradeleapp-per-role-ad2e84e317ec1291.yaml delete mode 100644 releasenotes/notes/use-ansible-for-hosts-entries-b4905552515e17ff.yaml delete mode 100644 releasenotes/notes/use-attr-interface-routes-dfbe74adce15da1d.yaml delete mode 100644 releasenotes/notes/use-become-true-in-deploy-steps-playbook-01decb18d895879f.yaml delete mode 100644 releasenotes/notes/use-octavias-own-user-log-format-default-3e9bb40c678388fc.yaml delete mode 100644 releasenotes/notes/use-role-name-ExtraConfig-with-deprecations-2688f34fbc6de74a.yaml delete mode 100644 releasenotes/notes/use-server-side-merging-service-net-map-0c65b5e421ce05bd.yaml delete mode 100644 releasenotes/notes/use-strict-host-key-checking-no-766c6d9814243de3.yaml delete mode 100644 releasenotes/notes/use-subnet-attr-interface-default-route-2578a4d292901024.yaml delete mode 100644 releasenotes/notes/use-subnet-attrs-ctlplane-cidr-a02e14a251733726.yaml delete mode 100644 releasenotes/notes/use-subnet-attrs-ctlplane-default-route-625ef5e414a65f2e.yaml delete mode 100644 releasenotes/notes/use-subnet-attrs-dnsservers-f751ec1125a9f787.yaml delete mode 100644 releasenotes/notes/use-subnet-attrs-ec2metadataip-aa28f3c030f13c9c.yaml delete mode 100644 releasenotes/notes/use-tags-for-condition-instead-of-role-name-3c15f9faaa59362e.yaml delete mode 100644 releasenotes/notes/validate-no-config-outputs-used-8abcb673da6d373f.yaml delete mode 100644 releasenotes/notes/var_lib_docker_deprec-0c48311c01605228.yaml delete mode 100644 releasenotes/notes/vgpu-devices-mapping-63dd870f3a00a98a.yaml delete mode 100644 releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml delete mode 100644 releasenotes/notes/vipmap-output-4a9ce99930960346.yaml delete mode 100644 releasenotes/notes/vmax_cinder_a6672898724a11e7.yaml delete mode 100644 releasenotes/notes/vmax_manila_2967789872aa11e8.yaml delete mode 100644 releasenotes/notes/vmx-deprecate-driver-e428e372280c44e6.yaml delete mode 100644 releasenotes/notes/vnc_tls-b3707d0134697cc7.yaml delete mode 100644 releasenotes/notes/vnx_cinder_b5572898724a11e7.yaml delete mode 100644 releasenotes/notes/vnx_manila_2967789872aa11e7.yaml delete mode 100644 releasenotes/notes/volume_use_multipath-for-libvirt--c8e93a0bb83e0bc8.yaml delete mode 100644 releasenotes/notes/vpp-84d35e51ff62a58c.yaml delete mode 100644 releasenotes/notes/vpp-ml2-8e115f7763510531.yaml delete mode 100644 releasenotes/notes/vxflexos-deprecate-driver-cd38e372280c44e6.yaml delete mode 100644 releasenotes/notes/vxflexos-driver-bec8e372280c44e6.yaml delete mode 100644 releasenotes/notes/vxlan-support-for-ovn-1320be8046aca9c6.yaml delete mode 100644 releasenotes/notes/wait_for_nova_api-7af0c6db1b607216.yaml delete mode 100644 releasenotes/notes/workaround-unset-fqdn-for-rhel-reg-be9c4620146096be.yaml delete mode 100644 releasenotes/notes/workflow_tasks-4da5830821b7154b.yaml delete mode 100644 releasenotes/notes/wrapper-containers-debug-844c13348c83de93.yaml delete mode 100644 releasenotes/notes/xtremio-add-ports-option-8991f7c8acc1aadb.yaml delete mode 100644 releasenotes/notes/xtremio-deprecate-old-config-d428e372280c44e6.yaml delete mode 100644 releasenotes/notes/xtremio-driver-a428f372280c44e6.yaml delete mode 100644 releasenotes/notes/xtremio-iscsi-remove-deprecated-68a8830be3d4f2b8.yaml delete mode 100644 releasenotes/notes/xtremio_cinder_c5572898724a11e7.yaml delete mode 100644 releasenotes/notes/zaqar-httpd-e7d91bf396da28d0.yaml delete mode 100644 releasenotes/notes/zaqar-removal-6cdb31276d5a26dc.yaml delete mode 100644 releasenotes/notes/zaqar-use-redis-by-default-930f542dda895a31.yaml delete mode 100644 releasenotes/notes/zaqar-ws-upgrade-8dda7caea7e6bc1e.yaml delete mode 100644 releasenotes/notes/zaqar_backends-ccf8adfd24a17bf5.yaml delete mode 100644 releasenotes/notes/zaqar_undercloud_backends-f63224a2a3aa684e.yaml delete mode 100644 releasenotes/notes/zaqar_ws_timeout_tunnel-d5d1e900dce79b34.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/pike.rst delete mode 100644 releasenotes/source/queens.rst delete mode 100644 releasenotes/source/rocky.rst delete mode 100644 releasenotes/source/stein.rst delete mode 100644 releasenotes/source/train.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 releasenotes/source/ussuri.rst delete mode 100644 releasenotes/source/victoria.rst delete mode 100644 releasenotes/source/wallaby.rst delete mode 100644 releasenotes/source/zed.rst delete mode 100644 requirements.txt delete mode 100644 roles/BlockStorage.yaml delete mode 100644 roles/CellController.yaml delete mode 100644 roles/CephAll.yaml delete mode 100644 roles/CephFile.yaml delete mode 100644 roles/CephObject.yaml delete mode 100644 roles/CephStorage.yaml delete mode 100644 roles/Compute.yaml delete mode 100644 roles/ComputeAlt.yaml delete mode 100644 roles/ComputeDVR.yaml delete mode 100644 roles/ComputeHCI.yaml delete mode 100644 roles/ComputeHCIOvsDpdk.yaml delete mode 100644 roles/ComputeHCISriov.yaml delete mode 100644 roles/ComputeInstanceHA.yaml delete mode 100644 roles/ComputeLocalEphemeral.yaml delete mode 100644 roles/ComputeOvsDpdk.yaml delete mode 100644 roles/ComputeOvsDpdkRT.yaml delete mode 100644 roles/ComputeOvsDpdkSriov.yaml delete mode 100644 roles/ComputeOvsDpdkSriovRT.yaml delete mode 100644 roles/ComputePPC64LE.yaml delete mode 100644 roles/ComputeRBDEphemeral.yaml delete mode 100644 roles/ComputeRealTime.yaml delete mode 100644 roles/ComputeSriov.yaml delete mode 100644 roles/ComputeSriovIB.yaml delete mode 100644 roles/ComputeSriovRT.yaml delete mode 100644 roles/ComputeVdpa.yaml delete mode 100644 roles/Controller.yaml delete mode 100644 roles/ControllerAllNovaStandalone.yaml delete mode 100644 roles/ControllerNoCeph.yaml delete mode 100644 roles/ControllerNovaStandalone.yaml delete mode 100644 roles/ControllerOpenstack.yaml delete mode 100644 roles/ControllerSriov.yaml delete mode 100644 roles/ControllerStorageDashboard.yaml delete mode 100644 roles/ControllerStorageNfs.yaml delete mode 100644 roles/Database.yaml delete mode 100644 roles/DesignateBind.yaml delete mode 100644 roles/DistributedCompute.yaml delete mode 100644 roles/DistributedComputeHCI.yaml delete mode 100644 roles/DistributedComputeHCIDashboard.yaml delete mode 100644 roles/DistributedComputeHCIScaleOut.yaml delete mode 100644 roles/DistributedComputeScaleOut.yaml delete mode 100644 roles/HciCephAll.yaml delete mode 100644 roles/HciCephFile.yaml delete mode 100644 roles/HciCephMon.yaml delete mode 100644 roles/HciCephObject.yaml delete mode 100644 roles/IronicConductor.yaml delete mode 100644 roles/Messaging.yaml delete mode 100644 roles/Minimal.yaml delete mode 100644 roles/Networker.yaml delete mode 100644 roles/NetworkerSriov.yaml delete mode 100644 roles/NovaManager.yaml delete mode 100644 roles/Novacontrol.yaml delete mode 100644 roles/ObjectStorage.yaml delete mode 100644 roles/README.rst delete mode 100644 roles/Standalone.yaml delete mode 100644 roles/Telemetry.yaml delete mode 100644 roles/Undercloud.yaml delete mode 100644 roles_data.yaml delete mode 100644 roles_data_standalone.yaml delete mode 100644 roles_data_undercloud.yaml delete mode 100644 sample-env-generator/README.rst delete mode 100644 sample-env-generator/composable-roles.yaml delete mode 100644 sample-env-generator/dcn.yaml delete mode 100644 sample-env-generator/messaging.yaml delete mode 100644 sample-env-generator/nova-cell.yaml delete mode 100644 sample-env-generator/openidc.yaml delete mode 100644 sample-env-generator/predictable-placement.yaml delete mode 100644 sample-env-generator/ssl.yaml delete mode 100644 sample-env-generator/standalone.yaml delete mode 100644 sample-env-generator/storage.yaml delete mode 100755 scripts/check-run-nova-compute delete mode 100644 scripts/delay-nova-compute delete mode 100755 scripts/undercloud-upgrade-ephemeral-heat.py delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-ansible-requirements.txt delete mode 100644 test-requirements.txt delete mode 100644 tools/__init__.py delete mode 100755 tools/check-up-to-date.sh delete mode 100755 tools/convert_heat_nic_config_to_ansible_j2.py delete mode 100755 tools/convert_policy_yaml_to_heat_template.py delete mode 100755 tools/convert_v1_net_data.py delete mode 100755 tools/make_ceph_disk_list.py delete mode 100755 tools/multi-rhel-container-image-prepare.py delete mode 100755 tools/process-templates.py delete mode 100755 tools/releasenotes_tox.sh delete mode 100755 tools/render-ansible-tasks.py delete mode 100755 tools/roles-data-generate-samples.sh delete mode 100755 tools/roles-data-generate.py delete mode 100755 tools/roles-data-validation.sh delete mode 100644 tools/tests/__init__.py delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/2-linux-bonds-vlans-controller.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/bond-vlans-controller.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/complex.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/multiple-nics-vlans-controller.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/simple.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/single-nic-linux-bridge-vlans-controller.yaml delete mode 100644 tools/tests/nic_config_convert_samples/heat_templates/single-nic-vlans-controller.yaml delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/2-linux-bonds-vlans-controller.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/bond-vlans-controller.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/complex_complete.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/complex_incomplete.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/multiple-nics-vlans-controller.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/simple.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/single-nic-linux-bridge-vlans-controller.j2 delete mode 100644 tools/tests/nic_config_convert_samples/j2_references/single-nic-vlans-controller.j2 delete mode 100644 tools/tests/nic_config_convert_samples/network_file_complex.yaml delete mode 100644 tools/tests/nic_config_convert_samples/networks_file_simple.yaml delete mode 100644 tools/tests/nic_config_convert_samples/stack_env_complex.yaml delete mode 100644 tools/tests/nic_config_convert_samples/stack_env_simple.yaml delete mode 100644 tools/tests/test_convert_heat_nic_config_to_ansible_j2.py delete mode 100755 tools/yaml-diff.py delete mode 100755 tools/yaml-validate.py delete mode 100644 tox.ini delete mode 100644 tripleo_heat_templates/__init__.py delete mode 100755 tripleo_heat_templates/environment_generator.py delete mode 100644 tripleo_heat_templates/tests/__init__.py delete mode 100644 tripleo_heat_templates/tests/test_environment_generator.py delete mode 100644 tripleo_heat_templates/tests/test_tht_ansible_syntax.py delete mode 100644 tripleo_heat_templates/tests/test_tht_ansible_syntax.yml delete mode 100644 tripleo_heat_templates/tests/test_tht_derivce_pci.py delete mode 100644 tripleo_heat_templates/tests/test_yaql.py delete mode 100644 tripleo_heat_templates/tests/tht-role-requirements.yml delete mode 100644 vip_data_default.yaml delete mode 100644 zuul.d/layout.yaml diff --git a/.ansible-lint b/.ansible-lint deleted file mode 100644 index cabc7ce6f9..0000000000 --- a/.ansible-lint +++ /dev/null @@ -1,25 +0,0 @@ -kinds: - - j2: "**/*.j2.yaml" - - playbook: "**/common/*.yml" - - yaml: "roles/*.yaml" -mock_modules: - - tripleo_all_nodes_data -skip_list: - # TODO(zbr): Address in follow-ups - - role-name - - unnamed-task - # yaml ones - - yaml[braces] - - yaml[colons] - - yaml[commas] - - yaml[comments] - - yaml[comments-indentation] - - yaml[empty-lines] - - yaml[indentation] - - yaml[line-length] - - yaml[new-line-at-end-of-file] - - yaml[trailing-spaces] - - yaml[truthy] - - yaml[brackets] - - yaml[hyphens] - - experimental diff --git a/.gitignore b/.gitignore deleted file mode 100644 index fb10b0cc60..0000000000 --- a/.gitignore +++ /dev/null @@ -1,191 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -# Bytecompiled Python -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.testrepository -.stestr/* -cover/* -MagicMock/* - -# Translations -*.mo - -*.bundle -Gemfile.lock - -# Built by pbr (python setup.py sdist): -AUTHORS -ChangeLog - -# Sphinx -doc/build - -# Editors -*~ -.*.swp -.*sw? - -# Leftovers from other commants like tox -e pep8,tht -.cache -common/post.yaml -common/services/blockstorage-role.yaml -common/services/cephstorage-role.yaml -common/services/compute-role.yaml -common/services/controller-role.yaml -common/services/objectstorage-role.yaml -deployment/apache/apache-baremetal-puppet.yaml -deployment/apache/apache-baremetal-ansible.yaml -deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml -deployment/haproxy/haproxy-internal-tls-certmonger.yaml -deployment/octavia/octavia-deployment-config.yaml -environments/deployed-ports.yaml -environments/deployed-network-environment.yaml -environments/deployed-server-environment.yaml -environments/net-2-linux-bonds-with-vlans.yaml -environments/net-bond-with-vlans-no-external.yaml -environments/net-bond-with-vlans.yaml -environments/net-dpdkbond-with-vlans.yaml -environments/net-multiple-nics-vlans.yaml -environments/net-multiple-nics.yaml -environments/net-noop.yaml -environments/net-single-nic-linux-bridge-with-vlans.yaml -environments/net-single-nic-with-vlans-no-external.yaml -environments/net-single-nic-with-vlans.yaml -environments/network-environment-v6-all.yaml -environments/network-environment-v6.yaml -environments/network-environment.yaml -environments/network-isolation-no-tunneling.yaml -environments/network-isolation-v6-all.yaml -environments/network-isolation-v6.yaml -environments/network-isolation.yaml -environments/networks-disable.yaml -environments/overcloud-baremetal.yaml -environments/split-stack-consistent-hostname-format.yaml -environments/ssl/enable-internal-tls.yaml -extraconfig/all_nodes/swap-partition.yaml -extraconfig/all_nodes/swap.yaml -extraconfig/nova_metadata/krb-service-principals/blockstorage-role.yaml -extraconfig/nova_metadata/krb-service-principals/cephstorage-role.yaml -extraconfig/nova_metadata/krb-service-principals/compute-role.yaml -extraconfig/nova_metadata/krb-service-principals/controller-role.yaml -extraconfig/nova_metadata/krb-service-principals/objectstorage-role.yaml -extraconfig/tasks/major_upgrade_pacemaker_init.yaml -network/external.yaml -network/external_v6.yaml -network/internal_api.yaml -network/internal_api_v6.yaml -network/management.yaml -network/management_v6.yaml -network/networks.yaml -network/ports/deployed_external.yaml -network/ports/deployed_internal_api.yaml -network/ports/deployed_management.yaml -network/ports/deployed_storage.yaml -network/ports/deployed_storage_mgmt.yaml -network/ports/deployed_tenant.yaml -network/ports/deployed_vip_external.yaml -network/ports/deployed_vip_internal_api.yaml -network/ports/deployed_vip_management.yaml -network/ports/deployed_vip_storage.yaml -network/ports/deployed_vip_storage_mgmt.yaml -network/ports/deployed_vip_tenant.yaml -network/ports/external.yaml -network/ports/external_from_pool.yaml -network/ports/external_from_pool_v6.yaml -network/ports/external_resource_external.yaml -network/ports/external_resource_external_v6.yaml -network/ports/external_resource_internal_api.yaml -network/ports/external_resource_internal_api_v6.yaml -network/ports/external_resource_management.yaml -network/ports/external_resource_management_v6.yaml -network/ports/external_resource_storage.yaml -network/ports/external_resource_storage_mgmt.yaml -network/ports/external_resource_storage_mgmt_v6.yaml -network/ports/external_resource_storage_v6.yaml -network/ports/external_resource_tenant.yaml -network/ports/external_resource_tenant_v6.yaml -network/ports/external_v6.yaml -network/ports/internal_api.yaml -network/ports/internal_api_from_pool.yaml -network/ports/internal_api_from_pool_v6.yaml -network/ports/internal_api_v6.yaml -network/ports/management.yaml -network/ports/management_from_pool.yaml -network/ports/management_from_pool_v6.yaml -network/ports/management_v6.yaml -network/ports/net_ip_list_map.yaml -network/ports/net_ip_map.yaml -network/ports/net_vip_map_external.yaml -network/ports/net_vip_map_external_v6.yaml -network/ports/storage.yaml -network/ports/storage_from_pool.yaml -network/ports/storage_from_pool_v6.yaml -network/ports/storage_mgmt.yaml -network/ports/storage_mgmt_from_pool.yaml -network/ports/storage_mgmt_from_pool_v6.yaml -network/ports/storage_mgmt_v6.yaml -network/ports/storage_v6.yaml -network/ports/tenant.yaml -network/ports/tenant_from_pool.yaml -network/ports/tenant_from_pool_v6.yaml -network/ports/tenant_v6.yaml -network/service_net_map.yaml -network/storage.yaml -network/storage_mgmt.yaml -network/storage_mgmt_v6.yaml -network/storage_v6.yaml -network/tenant.yaml -network/tenant_v6.yaml -overcloud-resource-registry-puppet.yaml -overcloud.yaml -puppet/blockstorage-config.yaml -puppet/blockstorage-role.yaml -puppet/cephstorage-config.yaml -puppet/cephstorage-role.yaml -puppet/compute-config.yaml -puppet/compute-role.yaml -puppet/controller-config.yaml -puppet/controller-role.yaml -puppet/objectstorage-config.yaml -puppet/objectstorage-role.yaml -puppet/post.yaml -tripleo_heat_templates/tests/ansible-pacemaker -tripleo_heat_templates/tests/tripleo-ansible - -# Files created by releasenotes build -releasenotes/build diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index a089b409c4..0000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./} -top_dir=./ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.rst b/README.rst index bb3d5d2a50..4ee2c5f138 100644 --- a/README.rst +++ b/README.rst @@ -1,134 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/tripleo-heat-templates.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -====================== -tripleo-heat-templates -====================== - -Heat templates to deploy OpenStack using OpenStack. - -* Free software: Apache License (2.0) -* Documentation: https://docs.openstack.org/tripleo-docs/latest/ -* Source: https://opendev.org/openstack/tripleo-heat-templates -* Bugs: https://bugs.launchpad.net/tripleo -* Release notes: https://docs.openstack.org/releasenotes/tripleo-heat-templates/ - -Features --------- - -The ability to deploy a multi-node, role based OpenStack deployment using -OpenStack Heat. Notable features include: - - * Choice of deployment/configuration tooling: puppet, (soon) docker - - * Role based deployment: roles for the controller, compute, ceph, swift, - and cinder storage - - * physical network configuration: support for isolated networks, bonding, - and standard ctlplane networking - -Directories ------------ - -A description of the directory layout in TripleO Heat Templates. - - * environments: contains heat environment files that can be used with -e - on the command like to enable features, etc. - - * extraconfig: templates used to enable 'extra' functionality. Includes - functionality for distro specific registration and upgrades. - - * firstboot: example first_boot scripts that can be used when initially - creating instances. - - * network: heat templates to help create isolated networks and ports - - * puppet: templates mostly driven by configuration with puppet. To use these - templates you can use the overcloud-resource-registry-puppet.yaml. - - * validation-scripts: validation scripts useful to all deployment - configurations - - * roles: example roles that can be used with the tripleoclient to generate - a roles_data.yaml for a deployment See the - `roles/README.rst `_ for additional details. - -Service testing matrix ----------------------- - -The configuration for the CI scenarios will be defined in `tripleo-heat-templates/ci/` -and should be executed according to the following table: - -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| - | scn000 | scn001 | scn002 | scn003 | scn004 | scn006 | scn007 | scn009 | scn010 | scn013 | non-ha | ovh-ha | -+================+========+========+========+========+========+========+========+========+========+========+========+========+ -| keystone | X | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| glance | | rbd | swift | file | rgw | file | file | | rbd | file | file | file | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cinder | | rbd | iscsi | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| heat | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ironic | | | | | | X | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| mysql | X | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron | | ovn | ovn | ovn | ovn | ovn | ovs | | ovn | ovn | ovn | ovn | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron-bgpvpn | | | | | wip | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| neutron-l2gw | | | | | wip | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| om-rpc | | rabbit | rabbit | amqp1 | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| om-notify | | rabbit | rabbit | rabbit | rabbit | rabbit | rabbit | | rabbit | rabbit | rabbit | rabbit | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| redis | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| haproxy | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| memcached | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| pacemaker | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| nova | | qemu | qemu | qemu | qemu | ironic | qemu | | qemu | qemu | qemu | qemu | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| placement | | X | X | X | X | X | X | | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ntp | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| snmp | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| timezone | X | X | X | X | X | X | X | X | X | X | X | X | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| swift | | | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| aodh | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| ceilometer | | X | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| gnocchi | | rbd | swift | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| barbican | | | X | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cephrgw | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| cephmds | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| manila | | | | | X | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| collectd | | X | | | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| designate | | | | X | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| octavia | | | | | | | | | X | X | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ -| Extra Firewall | | | | X | | | | | | | | | -+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab818b..0000000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/baremetal-samples/baremetal_deployment.yaml b/baremetal-samples/baremetal_deployment.yaml deleted file mode 100644 index 9f10c90359..0000000000 --- a/baremetal-samples/baremetal_deployment.yaml +++ /dev/null @@ -1,122 +0,0 @@ -- name: Controller - count: 3 - defaults: - network_config: - template: templates/net_config_bridge.j2 - default_route_network: - - external - instances: - - hostname: overcloud-controller-0 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.10 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.80 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.85 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.148 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.244 - - hostname: overcloud-controller-1 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.11 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.81 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.86 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.149 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.245 - - hostname: overcloud-controller-2 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.12 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.82 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.87 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.150 - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.246 -- name: Compute - count: 3 - defaults: - network_config: - template: "" - instances: - - hostname: overcloud-compute-0 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.20 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.90 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.95 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.158 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.144 - - hostname: overcloud-compute-1 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.21 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.91 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.96 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.159 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.145 - - hostname: overcloud-compute-2 - managed: true - networks: - - network: ctlplane - fixed_ip: 192.168.24.22 - - network: external - subnet: external_subnet - fixed_ip: 172.20.12.92 - - network: internal_api - subnet: internal_api_subnet - fixed_ip: 172.16.2.98 - - network: storage - subnet: storage_subnet - fixed_ip: 172.16.1.160 - - network: tenant - subnet: tenant_subnet - fixed_ip: 172.16.0.146 \ No newline at end of file diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 19bddede03..0000000000 --- a/bindep.txt +++ /dev/null @@ -1,11 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see https://docs.openstack.org/infra/bindep/ for additional information. -libssl-dev [platform:dpkg] -openssl-devel [platform:rpm] -gcc [platform:rpm] -python36-devel [platform:rhel8 platform:centos-8] -python3-devel [platform:rpm !platform:rhel8 !platform:centos-8] - -# For SELinux -python3-libselinux [platform:rpm] -python3-libsemanage [platform:rpm] diff --git a/ci/README.rst b/ci/README.rst deleted file mode 100644 index 666e711026..0000000000 --- a/ci/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================= -TripleO CI environments -======================= - -TripleO CI environments are exclusively used for Continuous Integration -purpose or for development usage. -They should not be used in production and we don't guarantee they work outside -TripleO CI. - -For more information about TripleO CI, please look: -https://github.com/openstack-infra/tripleo-ci diff --git a/ci/common/ironic_standalone_post.yaml b/ci/common/ironic_standalone_post.yaml deleted file mode 100644 index ddf44ce8d8..0000000000 --- a/ci/common/ironic_standalone_post.yaml +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: wallaby - -parameters: - servers: - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - IronicVBMCSetup: - type: vbmc_setup.yaml - properties: - servers: {get_param: servers} - diff --git a/ci/common/vbmc_setup.yaml b/ci/common/vbmc_setup.yaml deleted file mode 100644 index 6fbf1bb531..0000000000 --- a/ci/common/vbmc_setup.yaml +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: wallaby - -parameters: - servers: - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - ExtraConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: | - #!/bin/bash - set -e - dnf install -y python3-virtualbmc libvirt-client libvirt-daemon libvirt qemu-kvm - systemctl start libvirtd virtualbmc - git clone https://opendev.org/openstack/ironic /tmp/ironic - mkdir -p /var/log/bmlogs - chmod 777 /var/log/bmlogs - - # Create a ssh keypair and place the private key somewhere ansible inside the - # neutron_api container can read it. - ssh-keygen -P "" -f /etc/puppet/ci-key - chmod 644 /etc/puppet/ci-key - cat /etc/puppet/ci-key.pub >> /root/.ssh/authorized_keys - - LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} - LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images} - - mkdir -p $LIBVIRT_STORAGE_POOL_PATH - - if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then - virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target $LIBVIRT_STORAGE_POOL_PATH - virsh pool-autostart $LIBVIRT_STORAGE_POOL - virsh pool-start $LIBVIRT_STORAGE_POOL - fi - - pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') - if [ "$pool_state" != "running" ] ; then - virsh pool-start $LIBVIRT_STORAGE_POOL - fi - - /tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node1 -c 1 -m 3072 -d 10 -b br-ex -p 1161 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:11 > /var/log/bmlogs/create-node-1.log 2>&1 < /dev/null - /tmp/ironic/devstack/tools/ironic/scripts/create-node.sh -n node2 -c 1 -m 3072 -d 10 -b br-ex -p 1162 -M 1350 -f qcow2 -a x86_64 -E qemu -l /var/log/bmlogs -A 66:0d:1d:d8:0b:22 > /var/log/bmlogs/create-node-2.log 2>&1 < /dev/null - - vbmc --no-daemon add node1 --port 1161 - vbmc --no-daemon start node1 - vbmc --no-daemon add node2 --port 1162 - vbmc --no-daemon start node2 - - disown -a - ExtraDeployments: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: servers} - config: {get_resource: ExtraConfig} - actions: ['CREATE'] # Only do this on CREATE - name: VirtNodeExtraConfig diff --git a/ci/custom_ci_roles_data.yaml b/ci/custom_ci_roles_data.yaml deleted file mode 100644 index 34caa2b91c..0000000000 --- a/ci/custom_ci_roles_data.yaml +++ /dev/null @@ -1,254 +0,0 @@ -############################################################################### -# File generated by TripleO -############################################################################### -############################################################################### -# Role: Controller # -############################################################################### -- name: Controller - description: | - Controller role that has all the controller services loaded and handles - Database, Messaging and Network functions. - CountDefault: 1 - tags: - - primary - - controller - # Create external Neutron bridge for SNAT (and floating IPs when using - # ML2/OVS without DVR) - - external_bridge - networks: - ExternalCloud1: - subnet: external_cloud_1_subnet - InternalApiCloud1: - subnet: internal_api_cloud_1_subnet - StorageCloud1: - subnet: storage_cloud_1_subnet - StorageMgmtCloud1: - subnet: storage_mgmt_cloud_1_subnet - TenantCloud1: - subnet: tenant_cloud_1_subnet - # For systems with both IPv4 and IPv6, you may specify a gateway network for - # each, such as ['ControlPlane', 'External'] - default_route_networks: ['ExternalCloud1'] - HostnameFormatDefault: '%stackname%-controller-%index%' - RoleParametersDefault: - OVNCMSOptions: "enable-chassis-as-gw" - # Deprecated & backward-compatible values (FIXME: Make parameters consistent) - # Set uses_deprecated_params to True if any deprecated params are used. - uses_deprecated_params: True - deprecated_param_extraconfig: 'controllerExtraConfig' - update_serial: 1 - ServicesDefault: - - OS::TripleO::Services::Aide - - OS::TripleO::Services::AodhApi - - OS::TripleO::Services::AodhEvaluator - - OS::TripleO::Services::AodhListener - - OS::TripleO::Services::AodhNotifier - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::BarbicanApi - - OS::TripleO::Services::BarbicanBackendSimpleCrypto - - OS::TripleO::Services::BarbicanBackendDogtag - - OS::TripleO::Services::BarbicanBackendKmip - - OS::TripleO::Services::BarbicanBackendPkcs11Crypto - - OS::TripleO::Services::BootParams - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::CeilometerAgentCentral - - OS::TripleO::Services::CeilometerAgentNotification - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephExternal - - OS::TripleO::Services::CephGrafana - - OS::TripleO::Services::CephMds - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephRbdMirror - - OS::TripleO::Services::CephRgw - - OS::TripleO::Services::CinderApi - - OS::TripleO::Services::CinderBackendDellSc - - OS::TripleO::Services::CinderBackendDellEMCPowerFlex - - OS::TripleO::Services::CinderBackendDellEMCPowermax - - OS::TripleO::Services::CinderBackendDellEMCPowerStore - - OS::TripleO::Services::CinderBackendDellEMCSc - - OS::TripleO::Services::CinderBackendDellEMCUnity - - OS::TripleO::Services::CinderBackendDellEMCVNX - - OS::TripleO::Services::CinderBackendDellEMCXtremio - - OS::TripleO::Services::CinderBackendIBMSvf - - OS::TripleO::Services::CinderBackendNetApp - - OS::TripleO::Services::CinderBackendNfs - - OS::TripleO::Services::CinderBackendNVMeOF - - OS::TripleO::Services::CinderBackendPure - - OS::TripleO::Services::CinderBackendRbd - - OS::TripleO::Services::CinderBackup - - OS::TripleO::Services::CinderScheduler - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::DesignateApi - - OS::TripleO::Services::DesignateCentral - - OS::TripleO::Services::DesignateProducer - - OS::TripleO::Services::DesignateWorker - - OS::TripleO::Services::DesignateMDNS - - OS::TripleO::Services::DesignateSink - - OS::TripleO::Services::DesignateBind - - OS::TripleO::Services::Etcd - - OS::TripleO::Services::ExternalSwiftProxy - - OS::TripleO::Services::Frr - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::GnocchiApi - - OS::TripleO::Services::GnocchiMetricd - - OS::TripleO::Services::GnocchiStatsd - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::Horizon - - OS::TripleO::Services::IpaClient - - OS::TripleO::Services::Ipsec - - OS::TripleO::Services::IronicApi - - OS::TripleO::Services::IronicConductor - - OS::TripleO::Services::IronicInspector - - OS::TripleO::Services::IronicPxe - - OS::TripleO::Services::IronicNeutronAgent - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::ManilaApi - - OS::TripleO::Services::ManilaBackendCephFs - - OS::TripleO::Services::ManilaBackendFlashBlade - - OS::TripleO::Services::ManilaBackendIsilon - - OS::TripleO::Services::ManilaBackendNetapp - - OS::TripleO::Services::ManilaBackendPowerMax - - OS::TripleO::Services::ManilaBackendUnity - - OS::TripleO::Services::ManilaBackendVNX - - OS::TripleO::Services::ManilaScheduler - - OS::TripleO::Services::ManilaShare - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronBgpVpnApi - - OS::TripleO::Services::NeutronSfcApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::NeutronDhcpAgent - - OS::TripleO::Services::NeutronL2gwAgent - - OS::TripleO::Services::NeutronL2gwApi - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::NeutronLinuxbridgeAgent - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::NeutronOvsAgent - - OS::TripleO::Services::NeutronAgentsIBConfig - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaIronic - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::NovaVncProxy - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::OctaviaApi - - OS::TripleO::Services::OctaviaDeploymentConfig - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::OctaviaHousekeeping - - OS::TripleO::Services::OctaviaWorker - - OS::TripleO::Services::OpenStackClients - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Redis - - OS::TripleO::Services::Rhsm - - OS::TripleO::Services::Rsyslog - - OS::TripleO::Services::RsyslogSidecar - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::SwiftProxy - - OS::TripleO::Services::SwiftDispersion - - OS::TripleO::Services::SwiftRingBuilder - - OS::TripleO::Services::SwiftStorage - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::Tuned - - OS::TripleO::Services::Unbound -############################################################################### -# Role: Compute # -############################################################################### -- name: Compute - description: | - Basic Compute Node role - CountDefault: 1 - # Create external Neutron bridge (unset if using ML2/OVS without DVR) - tags: - - compute - - external_bridge - networks: - InternalApiCloud1: - subnet: internal_api_cloud_1_subnet - TenantCloud1: - subnet: tenant_cloud_1_subnet - StorageCloud1: - subnet: storage_cloud_1_subnet - HostnameFormatDefault: '%stackname%-novacompute-%index%' - RoleParametersDefault: - TunedProfileName: "virtual-host" - # Deprecated & backward-compatible values (FIXME: Make parameters consistent) - # Set uses_deprecated_params to True if any deprecated params are used. - # These deprecated_params only need to be used for existing roles and not for - # composable roles. - uses_deprecated_params: True - deprecated_param_extraconfig: 'NovaComputeExtraConfig' - deprecated_param_metadata: 'NovaComputeServerMetadata' - deprecated_param_ips: 'NovaComputeIPs' - deprecated_server_resource_name: 'NovaCompute' - update_serial: 25 - ServicesDefault: - - OS::TripleO::Services::Aide - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::BootParams - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephExternal - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::ComputeCeilometerAgent - - OS::TripleO::Services::ComputeNeutronCorePlugin - - OS::TripleO::Services::ComputeNeutronL3Agent - - OS::TripleO::Services::ComputeNeutronMetadataAgent - - OS::TripleO::Services::ComputeNeutronOvsAgent - - OS::TripleO::Services::Frr - - OS::TripleO::Services::IpaClient - - OS::TripleO::Services::Ipsec - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronBgpVpnBagpipe - - OS::TripleO::Services::NeutronLinuxbridgeAgent - - OS::TripleO::Services::NovaAZConfig - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaLibvirtGuests - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Rhsm - - OS::TripleO::Services::Rsyslog - - OS::TripleO::Services::RsyslogSidecar - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::Tuned - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent diff --git a/ci/custom_names_network_data.yaml b/ci/custom_names_network_data.yaml deleted file mode 100644 index 10c085b2ab..0000000000 --- a/ci/custom_names_network_data.yaml +++ /dev/null @@ -1,60 +0,0 @@ -- name: ExternalCloud1 - name_lower: external_cloud_1 - mtu: 1350 - vip: true - dns_domain: externalcloud1.ooo.test. - service_net_map_replace: external - subnets: - external_cloud_1_subnet: - ip_subnet: 10.0.0.0/24 - gateway_ip: 10.0.0.1 - allocation_pools: - - start: 10.0.0.10 - end: 10.0.0.50 -- name: InternalApiCloud1 - name_lower: internal_api_cloud_1 - mtu: 1350 - vip: true - dns_domain: internalapicloud1.ooo.test. - service_net_map_replace: internal_api - subnets: - internal_api_cloud_1_subnet: - ip_subnet: 172.17.0.0/24 - allocation_pools: - - start: 172.17.0.10 - end: 172.17.0.250 -- name: StorageCloud1 - name_lower: storage_cloud_1 - mtu: 1350 - vip: true - dns_domain: storagecloud1.ooo.test. - service_net_map_replace: storage - subnets: - storage_cloud_1_subnet: - ip_subnet: 172.18.0.0/24 - allocation_pools: - - start: 172.18.0.10 - end: 172.18.0.250 -- name: StorageMgmtCloud1 - name_lower: storage_mgmt_cloud_1 - mtu: 1350 - vip: true - dns_domain: storagemgmtcloud1.ooo.test. - service_net_map_replace: storage_mgmt - subnets: - storage_mgmt_cloud_1_subnet: - ip_subnet: 172.19.0.0/24 - allocation_pools: - - start: 172.19.0.10 - end: 172.19.0.250 -- name: TenantCloud1 - name_lower: tenant_cloud_1 - mtu: 1350 - dns_domain: tenantcloud1.ooo.test. - service_net_map_replace: tenant - subnets: - tenant_cloud_1_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/custom_names_vip_data.yaml b/ci/custom_names_vip_data.yaml deleted file mode 100644 index 402c314e0c..0000000000 --- a/ci/custom_names_vip_data.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt_cloud_1 -- network: internal_api_cloud_1 -- network: storage_cloud_1 -- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP - network: external_cloud_1 -- network: ctlplane diff --git a/ci/environments/README.rst b/ci/environments/README.rst deleted file mode 100644 index 4a3cb9d97f..0000000000 --- a/ci/environments/README.rst +++ /dev/null @@ -1,4 +0,0 @@ -This directory contains environments that are used in tripleo-ci. They may change from -release to release or within a release, and should not be relied upon in a production -environment. The top-level ``environments`` directory in tripleo-heat-templates -contains the production-ready environment files. diff --git a/ci/environments/ceph-min-osds.yaml b/ci/environments/ceph-min-osds.yaml deleted file mode 100644 index 4e72d31358..0000000000 --- a/ci/environments/ceph-min-osds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - CephPoolDefaultSize: 1 diff --git a/ci/environments/multinode-containers.yaml b/ci/environments/multinode-containers.yaml deleted file mode 100644 index 5ce898501f..0000000000 --- a/ci/environments/multinode-containers.yaml +++ /dev/null @@ -1,52 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::AuditD - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::ContainersLogrotateCrond - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::Horizon - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::NovaManager - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - Debug: True - DockerPuppetDebug: True - NotificationDriver: 'noop' - GlanceBackend: 'file' - ContainerCli: podman diff --git a/ci/environments/multinode-core.yaml b/ci/environments/multinode-core.yaml deleted file mode 100644 index 038ebe755c..0000000000 --- a/ci/environments/multinode-core.yaml +++ /dev/null @@ -1,34 +0,0 @@ -heat_template_version: wallaby - -description: > - OpenStack Core Service - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. Use - parameter_merge_strategies to merge it with the defaults. - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - Debug: - type: boolean - default: false - description: Set to True to enable debugging on all services. - -resources: - -outputs: - role_data: - description: Role data for the multinode firewall configuration - value: - service_name: multinode_core - firewall_rules: - '999 core': - proto: 'udp' - dport: - - 4789 diff --git a/ci/environments/network/multiple-nics-ipv6/README b/ci/environments/network/multiple-nics-ipv6/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/multiple-nics-ipv6/network-environment.yaml b/ci/environments/network/multiple-nics-ipv6/network-environment.yaml deleted file mode 100644 index 818a1ddbc4..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-environment.yaml +++ /dev/null @@ -1,18 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics_ipv6.j2' - ExternalNetCidr: 2001:db8:fd00:1000::/64 - ExternalAllocationPools: [{"start": "2001:db8:fd00:1000::10", "end": "2001:db8:fd00:1000:ffff:ffff:ffff:fffe"}] - ExternalInterfaceDefaultRoute: 2001:db8:fd00:1000::1 - InternalApiNetCidr: fd00:fd00:fd00:2000::/64 - InternalApiAllocationPools: [{"start": "fd00:fd00:fd00:2000::10", "end": "fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe"}] - StorageNetCidr: fd00:fd00:fd00:3000::/64 - StorageAllocationPools: [{"start": "fd00:fd00:fd00:3000::10", "end": "fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe"}] - StorageMgmtNetCidr: fd00:fd00:fd00:4000::/64 - StorageMgmtAllocationPools: [{"start": "fd00:fd00:fd00:4000::10", "end": "fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] diff --git a/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml b/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml deleted file mode 100644 index 216e73789d..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-isolation-absolute.yaml +++ /dev/null @@ -1,29 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external_v6.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_v6.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api_v6.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_v6.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage_v6.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_v6.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt_v6.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_v6.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml -parameter_defaults: - ManilaIPv6: True - diff --git a/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml b/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml deleted file mode 100644 index 6232f3ef4b..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/network-isolation.yaml +++ /dev/null @@ -1,28 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external_v6.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external_v6.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external_v6.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api_v6.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api_v6.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api_v6.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage_v6.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage_v6.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt_v6.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt_v6.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt_v6.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml -parameter_defaults: - ManilaIPv6: True diff --git a/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle b/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle deleted file mode 100644 index 02161a4b57..0000000000 --- a/ci/environments/network/multiple-nics-ipv6/ui-settings.pickle +++ /dev/null @@ -1,732 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -Vfd00:fd00:fd00:3000::10 -p26 -sg15 -Vfd00:fd00:fd00:3000::/64 -p27 -sg17 -I3 -sg18 -Vfd00:fd00:fd00:3000:ffff:ffff:ffff:fffe -p28 -ssS'auto_routes' -p29 -I00 -sS'bond_options' -p30 -V -p31 -sS'version' -p32 -I2 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V2001:db8:fd00:1000:ffff:ffff:ffff:fffe -p37 -sg17 -I1 -sg13 -V2001:db8:fd00:1000::10 -p38 -sg15 -V2001:db8:fd00:1000::/64 -p39 -sS'gateway' -p40 -V2001:db8:fd00:1000::1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -Vfd00:fd00:fd00:2000::10 -p44 -sg15 -Vfd00:fd00:fd00:2000::/64 -p45 -sg17 -I2 -sg18 -Vfd00:fd00:fd00:2000:ffff:ffff:ffff:fffe -p46 -ssS'ipv6' -p47 -I01 -sS'storage_mgmt' -p48 -(dp49 -g13 -Vfd00:fd00:fd00:4000::10 -p50 -sg15 -Vfd00:fd00:fd00:4000::/64 -p51 -sg17 -I4 -sg18 -Vfd00:fd00:fd00:4000:ffff:ffff:ffff:fffe -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sS'members' -p72 -(lp73 -(dp74 -Vip_netmask -p75 -V0.0.0.0/0 -p76 -sVname -p77 -VRoute -p78 -sVdefault -p79 -I01 -sVnext_hop -p80 -V{get_param: ControlPlaneDefaultRoute} -p81 -sg72 -(lp82 -sVtype -p83 -Vroute -p84 -sasVroutes -p85 -(lp86 -sVuse_dhcp -p87 -I00 -sVtype -p88 -Vinterface -p89 -sVname -p90 -Vnic1 -p91 -sa(dp92 -Vaddresses -p93 -(lp94 -sVnetwork -p95 -VStorage -p96 -sVprimary -p97 -I01 -sVmtu -p98 -I1350 -sg72 -(lp99 -sVroutes -p100 -(lp101 -sVuse_dhcp -p102 -I00 -sVtype -p103 -Vinterface -p104 -sVname -p105 -Vnic4 -p106 -sa(dp107 -Vaddresses -p108 -(lp109 -sVnetwork -p110 -VStorageMgmt -p111 -sVprimary -p112 -I01 -sVmtu -p113 -I1350 -sg72 -(lp114 -sVroutes -p115 -(lp116 -sVuse_dhcp -p117 -I00 -sVtype -p118 -Vinterface -p119 -sVname -p120 -Vnic5 -p121 -sasS'controller.yaml' -p122 -(lp123 -(dp124 -Vaddresses -p125 -(lp126 -sVnetwork -p127 -VControlPlane -p128 -sVprimary -p129 -I01 -sVmtu -p130 -I1350 -sg72 -(lp131 -(dp132 -Vip_netmask -p133 -V0.0.0.0/0 -p134 -sVname -p135 -VRoute -p136 -sVdefault -p137 -I01 -sVnext_hop -p138 -V{get_param: ControlPlaneDefaultRoute} -p139 -sg72 -(lp140 -sVtype -p141 -Vroute -p142 -sasVroutes -p143 -(lp144 -sVuse_dhcp -p145 -I00 -sVtype -p146 -Vinterface -p147 -sVname -p148 -Vnic1 -p149 -sa(dp150 -Vdns_servers -p151 -V{get_param: DnsServers} -p152 -sVaddresses -p153 -(lp154 -sVnetwork -p155 -VExternal -p156 -sVmtu -p157 -I-1 -sg72 -(lp158 -(dp159 -Vaddresses -p160 -(lp161 -sVnetwork -p162 -VNone -p163 -sVprimary -p164 -I01 -sVmtu -p165 -I1350 -sg72 -(lp166 -sVroutes -p167 -(lp168 -sVuse_dhcp -p169 -I00 -sVtype -p170 -Vinterface -p171 -sVname -p172 -Vnic2 -p173 -sa(dp174 -Vip_netmask -p175 -V0.0.0.0/0 -p176 -sVname -p177 -VRoute -p178 -sVdefault -p179 -I01 -sVnext_hop -p180 -V{get_param: ExternalInterfaceDefaultRoute} -p181 -sg72 -(lp182 -sVtype -p183 -Vroute -p184 -sasVroutes -p185 -(lp186 -sVuse_dhcp -p187 -I00 -sVtype -p188 -Vovs_bridge -p189 -sVname -p190 -Vbr-ex -p191 -sa(dp192 -Vaddresses -p193 -(lp194 -sVnetwork -p195 -VInternalApi -p196 -sVprimary -p197 -I01 -sVmtu -p198 -I1350 -sg72 -(lp199 -sVroutes -p200 -(lp201 -sVuse_dhcp -p202 -I00 -sVtype -p203 -Vinterface -p204 -sVname -p205 -Vnic3 -p206 -sa(dp207 -Vaddresses -p208 -(lp209 -sVnetwork -p210 -VStorage -p211 -sVprimary -p212 -I01 -sVmtu -p213 -I1350 -sg72 -(lp214 -sVroutes -p215 -(lp216 -sVuse_dhcp -p217 -I00 -sVtype -p218 -Vinterface -p219 -sVname -p220 -Vnic4 -p221 -sa(dp222 -Vaddresses -p223 -(lp224 -sVnetwork -p225 -VStorageMgmt -p226 -sVprimary -p227 -I01 -sVmtu -p228 -I1350 -sg72 -(lp229 -sVroutes -p230 -(lp231 -sVuse_dhcp -p232 -I00 -sVtype -p233 -Vinterface -p234 -sVname -p235 -Vnic5 -p236 -sa(dp237 -Vdns_servers -p238 -V{get_param: DnsServers} -p239 -sVaddresses -p240 -(lp241 -sVnetwork -p242 -VTenant -p243 -sVmtu -p244 -I-1 -sg72 -(lp245 -(dp246 -Vaddresses -p247 -(lp248 -sVnetwork -p249 -VNone -p250 -sVprimary -p251 -I01 -sVmtu -p252 -I1350 -sg72 -(lp253 -sVroutes -p254 -(lp255 -sVuse_dhcp -p256 -I00 -sVtype -p257 -Vinterface -p258 -sVname -p259 -Vnic6 -p260 -sasVroutes -p261 -(lp262 -sVuse_dhcp -p263 -I00 -sVtype -p264 -Vovs_bridge -p265 -sVname -p266 -Vbr-tenant -p267 -sasS'swift-storage.yaml' -p268 -(lp269 -sS'compute.yaml' -p270 -(lp271 -(dp272 -Vaddresses -p273 -(lp274 -sVnetwork -p275 -VControlPlane -p276 -sVprimary -p277 -I01 -sVmtu -p278 -I1350 -sg72 -(lp279 -(dp280 -Vip_netmask -p281 -V0.0.0.0/0 -p282 -sVname -p283 -VRoute -p284 -sVdefault -p285 -I01 -sVnext_hop -p286 -V{get_param: ControlPlaneDefaultRoute} -p287 -sg72 -(lp288 -sVtype -p289 -Vroute -p290 -sasVroutes -p291 -(lp292 -sVuse_dhcp -p293 -I00 -sVtype -p294 -Vinterface -p295 -sVname -p296 -Vnic1 -p297 -sa(dp298 -Vaddresses -p299 -(lp300 -sVnetwork -p301 -VInternalApi -p302 -sVprimary -p303 -I01 -sVmtu -p304 -I1350 -sg72 -(lp305 -sVroutes -p306 -(lp307 -sVuse_dhcp -p308 -I00 -sVtype -p309 -Vinterface -p310 -sVname -p311 -Vnic3 -p312 -sa(dp313 -Vaddresses -p314 -(lp315 -sVnetwork -p316 -VStorage -p317 -sVprimary -p318 -I01 -sVmtu -p319 -I1350 -sg72 -(lp320 -sVroutes -p321 -(lp322 -sVuse_dhcp -p323 -I00 -sVtype -p324 -Vinterface -p325 -sVname -p326 -Vnic4 -p327 -sa(dp328 -Vdns_servers -p329 -V{get_param: DnsServers} -p330 -sVaddresses -p331 -(lp332 -sVnetwork -p333 -VTenant -p334 -sVmtu -p335 -I-1 -sg72 -(lp336 -(dp337 -Vaddresses -p338 -(lp339 -sVnetwork -p340 -VNone -p341 -sVprimary -p342 -I01 -sVmtu -p343 -I1350 -sg72 -(lp344 -sVroutes -p345 -(lp346 -sVuse_dhcp -p347 -I00 -sVtype -p348 -Vinterface -p349 -sVname -p350 -Vnic6 -p351 -sasVroutes -p352 -(lp353 -sVuse_dhcp -p354 -I00 -sVtype -p355 -Vovs_bridge -p356 -sVname -p357 -Vbr-tenant -p358 -sass. \ No newline at end of file diff --git a/ci/environments/network/multiple-nics/README b/ci/environments/network/multiple-nics/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/multiple-nics/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/multiple-nics/custom-network-environment.yaml b/ci/environments/network/multiple-nics/custom-network-environment.yaml deleted file mode 100644 index 841b2cdf5d..0000000000 --- a/ci/environments/network/multiple-nics/custom-network-environment.yaml +++ /dev/null @@ -1,27 +0,0 @@ -parameter_merge_strategies: - ServiceNetMap: merge -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2' - ComputeNetworkConfigTemplate: 'templates/ci/custom_network_multiple_nics.j2' - CloudNameExternalCloud1: overcloud1.ooo.test - CloudNameInternalApiCloud1: overcloud1.internalapicloud1.ooo.test - CloudNameStorageCloud1: overcloud1.storagecloud1.ooo.test - CloudNameStorageMgmtCloud1: overcloud1.storagemgmtcloud1.ooo.test - CloudNameCtlplane: overcloud1.ctlplane.ooo.test - CloudDomain: ooo.test - VipSubnetMap: - ctlplane: ctlplane-subnet - InternalApiCloud1: internal_api_cloud_1_subnet - StorageCloud1: storage_cloud_1_subnet - StorageMgmtCloud1: storage_mgmt_cloud_1_subnet - TenantCloud1: tenant_cloud_1_subnet - ExternalCloud1: external_cloud_1_subnet - redis: internal_api_cloud_1_subnet - ovn_dbs: internal_api_cloud_1_subnet - ServiceNetMap: - ComputeHostnameResolveNetwork: internal_api_cloud_1 - ControllerHostnameResolveNetwork: internal_api_cloud_1 - CephStorageHostnameResolveNetwork: storage_cloud_1 - ControllerMetricsQdrNetwork: internal_api_cloud_1 - ComputeMetricsQdrNetwork: internal_api_cloud_1 - CephStorageMetricsQdrNetwork: storage_cloud_1 diff --git a/ci/environments/network/multiple-nics/network-environment.yaml b/ci/environments/network/multiple-nics/network-environment.yaml deleted file mode 100644 index a57fa59258..0000000000 --- a/ci/environments/network/multiple-nics/network-environment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/multiple_nics.j2' - ExternalNetCidr: 10.0.0.0/24 - ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}] - ExternalInterfaceDefaultRoute: 10.0.0.1 - PublicVirtualFixedIPs: [{ "ip_address": "10.0.0.5" }] - InternalApiNetCidr: 172.17.0.0/24 - InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}] - StorageNetCidr: 172.18.0.0/24 - StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}] - StorageMgmtNetCidr: 172.19.0.0/24 - StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] diff --git a/ci/environments/network/multiple-nics/network-isolation-absolute.yaml b/ci/environments/network/multiple-nics/network-isolation-absolute.yaml deleted file mode 100644 index b925631c3a..0000000000 --- a/ci/environments/network/multiple-nics/network-isolation-absolute.yaml +++ /dev/null @@ -1,32 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::CellController::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::CellController::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CellController::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CellController::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::CellController::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml diff --git a/ci/environments/network/multiple-nics/network-isolation.yaml b/ci/environments/network/multiple-nics/network-isolation.yaml deleted file mode 100644 index fd05d7cd0c..0000000000 --- a/ci/environments/network/multiple-nics/network-isolation.yaml +++ /dev/null @@ -1,27 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Compute::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml diff --git a/ci/environments/network/multiple-nics/ui-settings.pickle b/ci/environments/network/multiple-nics/ui-settings.pickle deleted file mode 100644 index 32b4be3463..0000000000 --- a/ci/environments/network/multiple-nics/ui-settings.pickle +++ /dev/null @@ -1,716 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -V172.18.0.10 -p26 -sg15 -V172.18.0.0/24 -p27 -sg17 -I3 -sg18 -V172.18.0.250 -p28 -ssS'auto_routes' -p29 -I01 -sS'bond_options' -p30 -V -p31 -sS'version' -p32 -I1 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V10.0.0.50 -p37 -sg17 -I1 -sg13 -V10.0.0.10 -p38 -sg15 -V10.0.0.0/24 -p39 -sS'gateway' -p40 -V10.0.0.1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -V172.17.0.10 -p44 -sg15 -V172.17.0.0/24 -p45 -sg17 -I2 -sg18 -V172.17.0.250 -p46 -ssS'ipv6' -p47 -I00 -sS'storage_mgmt' -p48 -(dp49 -g13 -V172.19.0.10 -p50 -sg15 -V172.19.0.0/24 -p51 -sg17 -I4 -sg18 -V172.19.0.250 -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sVport_name -p72 -g31 -sS'members' -p73 -(lp74 -sVroutes -p75 -(lp76 -sVuse_dhcp -p77 -I00 -sVtype -p78 -Vinterface -p79 -sVname -p80 -Vnic1 -p81 -sa(dp82 -Vaddresses -p83 -(lp84 -sVnetwork -p85 -VStorage -p86 -sVprimary -p87 -I01 -sVmtu -p88 -I1350 -sVport_name -p89 -g31 -sg73 -(lp90 -sVroutes -p91 -(lp92 -sVuse_dhcp -p93 -I00 -sVtype -p94 -Vinterface -p95 -sVname -p96 -Vnic4 -p97 -sa(dp98 -Vaddresses -p99 -(lp100 -sVnetwork -p101 -VStorageMgmt -p102 -sVprimary -p103 -I01 -sVmtu -p104 -I1350 -sVport_name -p105 -g31 -sg73 -(lp106 -sVroutes -p107 -(lp108 -sVuse_dhcp -p109 -I00 -sVtype -p110 -Vinterface -p111 -sVname -p112 -Vnic5 -p113 -sasS'controller.yaml' -p114 -(lp115 -(dp116 -Vaddresses -p117 -(lp118 -sVnetwork -p119 -VControlPlane -p120 -sVprimary -p121 -I01 -sVmtu -p122 -I1350 -sVport_name -p123 -g31 -sg73 -(lp124 -sVroutes -p125 -(lp126 -sVuse_dhcp -p127 -I00 -sVtype -p128 -Vinterface -p129 -sVname -p130 -Vnic1 -p131 -sa(dp132 -Vdns_servers -p133 -V{get_param: DnsServers} -p134 -sVaddresses -p135 -(lp136 -sVnetwork -p137 -VExternal -p138 -sVmtu -p139 -I-1 -sg73 -(lp140 -(dp141 -Vaddresses -p142 -(lp143 -sVnetwork -p144 -VNone -p145 -sVprimary -p146 -I01 -sVmtu -p147 -I1350 -sVport_name -p148 -g31 -sg73 -(lp149 -sVroutes -p150 -(lp151 -sVuse_dhcp -p152 -I00 -sVtype -p153 -Vinterface -p154 -sVname -p155 -Vnic2 -p156 -sasVroutes -p157 -(lp158 -sVuse_dhcp -p159 -I00 -sVtype -p160 -Vovs_bridge -p161 -sVname -p162 -Vbr-ex -p163 -sa(dp164 -Vaddresses -p165 -(lp166 -sVnetwork -p167 -VInternalApi -p168 -sVprimary -p169 -I01 -sVmtu -p170 -I1350 -sVport_name -p171 -g31 -sg73 -(lp172 -sVroutes -p173 -(lp174 -sVuse_dhcp -p175 -I00 -sVtype -p176 -Vinterface -p177 -sVname -p178 -Vnic3 -p179 -sa(dp180 -Vaddresses -p181 -(lp182 -sVnetwork -p183 -VStorage -p184 -sVprimary -p185 -I01 -sVmtu -p186 -I1350 -sVport_name -p187 -g31 -sg73 -(lp188 -sVroutes -p189 -(lp190 -sVuse_dhcp -p191 -I00 -sVtype -p192 -Vinterface -p193 -sVname -p194 -Vnic4 -p195 -sa(dp196 -Vaddresses -p197 -(lp198 -sVnetwork -p199 -VStorageMgmt -p200 -sVprimary -p201 -I01 -sVmtu -p202 -I1350 -sVport_name -p203 -g31 -sg73 -(lp204 -sVroutes -p205 -(lp206 -sVuse_dhcp -p207 -I00 -sVtype -p208 -Vinterface -p209 -sVname -p210 -Vnic5 -p211 -sa(dp212 -Vdns_servers -p213 -V{get_param: DnsServers} -p214 -sVaddresses -p215 -(lp216 -sVnetwork -p217 -VTenant -p218 -sVmtu -p219 -I-1 -sg73 -(lp220 -(dp221 -Vaddresses -p222 -(lp223 -sVnetwork -p224 -VNone -p225 -sVprimary -p226 -I01 -sVmtu -p227 -I1350 -sVport_name -p228 -g31 -sg73 -(lp229 -sVroutes -p230 -(lp231 -sVuse_dhcp -p232 -I00 -sVtype -p233 -Vinterface -p234 -sVname -p235 -Vnic6 -p236 -sasVroutes -p237 -(lp238 -sVuse_dhcp -p239 -I00 -sVtype -p240 -Vovs_bridge -p241 -sVname -p242 -Vbr-tenant -p243 -sasS'swift-storage.yaml' -p244 -(lp245 -sS'compute.yaml' -p246 -(lp247 -(dp248 -Vaddresses -p249 -(lp250 -sVnetwork -p251 -VControlPlane -p252 -sVprimary -p253 -I01 -sVmtu -p254 -I1350 -sVport_name -p255 -g31 -sg73 -(lp256 -sVroutes -p257 -(lp258 -sVuse_dhcp -p259 -I00 -sVtype -p260 -Vinterface -p261 -sVname -p262 -Vnic1 -p263 -sa(dp264 -Vaddresses -p265 -(lp266 -sVnetwork -p267 -VInternalApi -p268 -sVprimary -p269 -I01 -sVmtu -p270 -I1350 -sVport_name -p271 -g31 -sg73 -(lp272 -sVroutes -p273 -(lp274 -sVuse_dhcp -p275 -I00 -sVtype -p276 -Vinterface -p277 -sVname -p278 -Vnic3 -p279 -sa(dp280 -Vaddresses -p281 -(lp282 -sVnetwork -p283 -VStorage -p284 -sVprimary -p285 -I01 -sVmtu -p286 -I1350 -sVport_name -p287 -g31 -sg73 -(lp288 -sVroutes -p289 -(lp290 -sVuse_dhcp -p291 -I00 -sVtype -p292 -Vinterface -p293 -sVname -p294 -Vnic4 -p295 -sa(dp296 -Vdns_servers -p297 -V{get_param: DnsServers} -p298 -sVaddresses -p299 -(lp300 -sVnetwork -p301 -VTenant -p302 -sVmtu -p303 -I-1 -sg73 -(lp304 -(dp305 -Vaddresses -p306 -(lp307 -sVnetwork -p308 -VNone -p309 -sVprimary -p310 -I01 -sVmtu -p311 -I1350 -sVport_name -p312 -g31 -sg73 -(lp313 -sVroutes -p314 -(lp315 -sVuse_dhcp -p316 -I00 -sVtype -p317 -Vinterface -p318 -sVname -p319 -Vnic6 -p320 -sasVroutes -p321 -(lp322 -sVuse_dhcp -p323 -I00 -sVtype -p324 -Vovs_bridge -p325 -sVname -p326 -Vbr-tenant -p327 -sa(dp328 -Vaddresses -p329 -(lp330 -sVnetwork -p331 -VStorageMgmt -p332 -sVprimary -p333 -I01 -sVmtu -p334 -I1350 -sVport_name -p335 -g31 -sg73 -(lp336 -sVroutes -p337 -(lp338 -sVuse_dhcp -p339 -I00 -sVtype -p340 -Vinterface -p341 -sVname -p342 -Vnic5 -p343 -sass. \ No newline at end of file diff --git a/ci/environments/network/public-bond/README b/ci/environments/network/public-bond/README deleted file mode 100644 index 189bfb58ff..0000000000 --- a/ci/environments/network/public-bond/README +++ /dev/null @@ -1,23 +0,0 @@ -Generated Network Isolation Templates -------------------------------------- -These templates were generated by the UI tool at -https://github.com/cybertron/tripleo-scripts#net-iso-genpy - -ui-settings.pickle is specific to the tool. TripleO will not use it when -doing deployments with these templates, but it is needed to be able to -load the templates into the UI again. Note that the UI only reads this file, -so any changes made by hand to the templates will not be reflected in the UI. - -The network-isolation.yaml file needs to reference the port files shipped with -tripleo-heat-templates, so by default the tool generates the paths assuming -network-isolation.yaml will be copied into the environments/ directory of -tripleo-heat-templates. - -If the standard tripleo-heat-templates are in use, then the -network-isolation-absolute.yaml file can be used instead. It has hard-coded -references to the port files in /usr/share/openstack-tripleo-heat-templates. - -If the generated network isolation templates are at ~/generated-templates, an -example deployment command would look like: - -openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml diff --git a/ci/environments/network/public-bond/network-environment.yaml b/ci/environments/network/public-bond/network-environment.yaml deleted file mode 100644 index 527042f922..0000000000 --- a/ci/environments/network/public-bond/network-environment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ComputeNetworkConfigTemplate: 'templates/ci/public_bond.j2' - BlockStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - CellControllerNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ObjectStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - CephStorageNetworkConfigTemplate: 'templates/ci/public_bond.j2' - ExternalNetCidr: 10.0.0.0/24 - ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}] - ExternalInterfaceDefaultRoute: 10.0.0.1 - InternalApiNetCidr: 172.17.0.0/24 - InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}] - StorageNetCidr: 172.18.0.0/24 - StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}] - StorageMgmtNetCidr: 172.19.0.0/24 - StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}] - TenantNetCidr: 172.16.0.0/24 - TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}] - BondInterfaceOvsOptions: bond_mode=balance-slb diff --git a/ci/environments/network/public-bond/network-isolation-absolute.yaml b/ci/environments/network/public-bond/network-isolation-absolute.yaml deleted file mode 100644 index 24bae4b2db..0000000000 --- a/ci/environments/network/public-bond/network-isolation-absolute.yaml +++ /dev/null @@ -1,26 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml diff --git a/ci/environments/network/public-bond/network-isolation.yaml b/ci/environments/network/public-bond/network-isolation.yaml deleted file mode 100644 index d59d260dea..0000000000 --- a/ci/environments/network/public-bond/network-isolation.yaml +++ /dev/null @@ -1,26 +0,0 @@ -resource_registry: - # External - OS::TripleO::Network::External: ../../../../network/external.yaml - OS::TripleO::Network::Ports::ExternalVipPort: ../../../../network/ports/external.yaml - OS::TripleO::Controller::Ports::ExternalPort: ../../../../network/ports/external.yaml - # InternalApi - OS::TripleO::Network::InternalApi: ../../../../network/internal_api.yaml - OS::TripleO::Network::Ports::InternalApiVipPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Controller::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - OS::TripleO::Compute::Ports::InternalApiPort: ../../../../network/ports/internal_api.yaml - # Storage - OS::TripleO::Network::Storage: ../../../../network/storage.yaml - OS::TripleO::Network::Ports::StorageVipPort: ../../../../network/ports/storage.yaml - OS::TripleO::Controller::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::Compute::Ports::StoragePort: ../../../../network/ports/storage.yaml - OS::TripleO::CephStorage::Ports::StoragePort: ../../../../network/ports/storage.yaml - # StorageMgmt - OS::TripleO::Network::StorageMgmt: ../../../../network/storage_mgmt.yaml - OS::TripleO::Network::Ports::StorageMgmtVipPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../../../../network/ports/storage_mgmt.yaml - # Tenant - OS::TripleO::Network::Tenant: ../../../../network/tenant.yaml - OS::TripleO::Network::Ports::TenantVipPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::TenantPort: ../../../../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::TenantPort: ../../../../network/ports/tenant.yaml diff --git a/ci/environments/network/public-bond/ui-settings.pickle b/ci/environments/network/public-bond/ui-settings.pickle deleted file mode 100644 index 0927ffeb97..0000000000 --- a/ci/environments/network/public-bond/ui-settings.pickle +++ /dev/null @@ -1,699 +0,0 @@ -(dp0 -S'global_data' -p1 -(dp2 -S'control' -p3 -(dp4 -S'route' -p5 -V192.168.24.1 -p6 -sS'mask' -p7 -I24 -sS'ec2' -p8 -V192.168.24.1 -p9 -ssS'major' -p10 -I1 -sS'management' -p11 -(dp12 -S'start' -p13 -V172.20.0.10 -p14 -sS'cidr' -p15 -V172.20.0.0/24 -p16 -sS'vlan' -p17 -I6 -sS'end' -p18 -V172.20.0.250 -p19 -ssS'dns2' -p20 -V8.8.4.4 -p21 -sS'dns1' -p22 -V8.8.8.8 -p23 -sS'storage' -p24 -(dp25 -g13 -V172.18.0.10 -p26 -sg15 -V172.18.0.0/24 -p27 -sg17 -I3 -sg18 -V172.18.0.250 -p28 -ssS'auto_routes' -p29 -I01 -sS'bond_options' -p30 -Vbond_mode=balance-slb -p31 -sS'version' -p32 -I2 -sS'external' -p33 -(dp34 -S'bridge' -p35 -V'' -p36 -sg18 -V10.0.0.50 -p37 -sg17 -I1 -sg13 -V10.0.0.10 -p38 -sg15 -V10.0.0.0/24 -p39 -sS'gateway' -p40 -V10.0.0.1 -p41 -ssS'internal_api' -p42 -(dp43 -g13 -V172.17.0.10 -p44 -sg15 -V172.17.0.0/24 -p45 -sg17 -I2 -sg18 -V172.17.0.250 -p46 -ssS'ipv6' -p47 -I00 -sS'storage_mgmt' -p48 -(dp49 -g13 -V172.19.0.10 -p50 -sg15 -V172.19.0.0/24 -p51 -sg17 -I4 -sg18 -V172.19.0.250 -p52 -ssS'minor' -p53 -I2 -sS'tenant' -p54 -(dp55 -g13 -V172.16.0.10 -p56 -sg15 -V172.16.0.0/24 -p57 -sg17 -I5 -sg18 -V172.16.0.250 -p58 -sssS'data' -p59 -(dp60 -S'cinder-storage.yaml' -p61 -(lp62 -sS'ceph-storage.yaml' -p63 -(lp64 -(dp65 -Vaddresses -p66 -(lp67 -sVnetwork -p68 -VControlPlane -p69 -sVprimary -p70 -I01 -sVmtu -p71 -I1350 -sS'members' -p72 -(lp73 -sVroutes -p74 -(lp75 -sVuse_dhcp -p76 -I00 -sVtype -p77 -Vinterface -p78 -sVname -p79 -Vnic1 -p80 -sa(dp81 -Vaddresses -p82 -(lp83 -sVnetwork -p84 -VStorage -p85 -sVprimary -p86 -I01 -sVmtu -p87 -I1350 -sg72 -(lp88 -sVroutes -p89 -(lp90 -sVuse_dhcp -p91 -I00 -sVtype -p92 -Vinterface -p93 -sVname -p94 -Vnic5 -p95 -sa(dp96 -Vaddresses -p97 -(lp98 -sVnetwork -p99 -VStorageMgmt -p100 -sVprimary -p101 -I01 -sVmtu -p102 -I1350 -sg72 -(lp103 -sVroutes -p104 -(lp105 -sVuse_dhcp -p106 -I00 -sVtype -p107 -Vinterface -p108 -sVname -p109 -Vnic6 -p110 -sasS'controller.yaml' -p111 -(lp112 -(dp113 -Vaddresses -p114 -(lp115 -sVnetwork -p116 -VControlPlane -p117 -sVprimary -p118 -I01 -sVmtu -p119 -I1350 -sg72 -(lp120 -sVroutes -p121 -(lp122 -sVuse_dhcp -p123 -I00 -sVtype -p124 -Vinterface -p125 -sVname -p126 -Vnic1 -p127 -sa(dp128 -Vdns_servers -p129 -V{get_param: DnsServers} -p130 -sVaddresses -p131 -(lp132 -sVnetwork -p133 -VExternal -p134 -sVmtu -p135 -I-1 -sg72 -(lp136 -(dp137 -Vnetwork -p138 -VNone -p139 -sVbond_type -p140 -Vovs -p141 -sVovs_options -p142 -V{get_param: BondInterfaceOvsOptions} -p143 -sVmtu -p144 -I-1 -sg72 -(lp145 -(dp146 -Vaddresses -p147 -(lp148 -sVnetwork -p149 -VNone -p150 -sVprimary -p151 -I01 -sVmtu -p152 -I1350 -sVroutes -p153 -(lp154 -sVuse_dhcp -p155 -I00 -sVtype -p156 -Vinterface -p157 -sVname -p158 -Vnic2 -p159 -sa(dp160 -Vaddresses -p161 -(lp162 -sVnetwork -p163 -VNone -p164 -sVprimary -p165 -I00 -sVmtu -p166 -I1350 -sVroutes -p167 -(lp168 -sVuse_dhcp -p169 -I00 -sVtype -p170 -Vinterface -p171 -sVname -p172 -Vnic3 -p173 -sasVroutes -p174 -(lp175 -sVtype -p176 -Vovs_bond -p177 -sVname -p178 -Vbond1 -p179 -sasVroutes -p180 -(lp181 -sVuse_dhcp -p182 -I00 -sVtype -p183 -Vovs_bridge -p184 -sVname -p185 -Vbr-ex -p186 -sa(dp187 -Vaddresses -p188 -(lp189 -sVnetwork -p190 -VInternalApi -p191 -sVprimary -p192 -I01 -sVmtu -p193 -I1350 -sg72 -(lp194 -sVroutes -p195 -(lp196 -sVuse_dhcp -p197 -I00 -sVtype -p198 -Vinterface -p199 -sVname -p200 -Vnic4 -p201 -sa(dp202 -Vaddresses -p203 -(lp204 -sVnetwork -p205 -VStorage -p206 -sVprimary -p207 -I01 -sVmtu -p208 -I1350 -sg72 -(lp209 -sVroutes -p210 -(lp211 -sVuse_dhcp -p212 -I00 -sVtype -p213 -Vinterface -p214 -sVname -p215 -Vnic5 -p216 -sa(dp217 -Vaddresses -p218 -(lp219 -sVnetwork -p220 -VStorageMgmt -p221 -sVprimary -p222 -I01 -sVmtu -p223 -I1350 -sg72 -(lp224 -sVroutes -p225 -(lp226 -sVuse_dhcp -p227 -I00 -sVtype -p228 -Vinterface -p229 -sVname -p230 -Vnic6 -p231 -sa(dp232 -Vdns_servers -p233 -V{get_param: DnsServers} -p234 -sVaddresses -p235 -(lp236 -sVnetwork -p237 -VTenant -p238 -sVmtu -p239 -I-1 -sg72 -(lp240 -(dp241 -Vaddresses -p242 -(lp243 -sVnetwork -p244 -VNone -p245 -sVprimary -p246 -I01 -sVmtu -p247 -I1350 -sg72 -(lp248 -sVroutes -p249 -(lp250 -sVuse_dhcp -p251 -I00 -sVtype -p252 -Vinterface -p253 -sVname -p254 -Vnic7 -p255 -sasVroutes -p256 -(lp257 -sVuse_dhcp -p258 -I00 -sVtype -p259 -Vovs_bridge -p260 -sVname -p261 -Vbr-tenant -p262 -sasS'swift-storage.yaml' -p263 -(lp264 -sS'compute.yaml' -p265 -(lp266 -(dp267 -Vaddresses -p268 -(lp269 -sVnetwork -p270 -VControlPlane -p271 -sVprimary -p272 -I01 -sVmtu -p273 -I1350 -sg72 -(lp274 -sVroutes -p275 -(lp276 -sVuse_dhcp -p277 -I00 -sVtype -p278 -Vinterface -p279 -sVname -p280 -Vnic1 -p281 -sa(dp282 -Vaddresses -p283 -(lp284 -sVnetwork -p285 -VInternalApi -p286 -sVprimary -p287 -I01 -sVmtu -p288 -I1350 -sg72 -(lp289 -sVroutes -p290 -(lp291 -sVuse_dhcp -p292 -I00 -sVtype -p293 -Vinterface -p294 -sVname -p295 -Vnic4 -p296 -sa(dp297 -Vaddresses -p298 -(lp299 -sVnetwork -p300 -VStorage -p301 -sVprimary -p302 -I01 -sVmtu -p303 -I1350 -sg72 -(lp304 -sVroutes -p305 -(lp306 -sVuse_dhcp -p307 -I00 -sVtype -p308 -Vinterface -p309 -sVname -p310 -Vnic5 -p311 -sa(dp312 -Vdns_servers -p313 -V{get_param: DnsServers} -p314 -sVaddresses -p315 -(lp316 -sVnetwork -p317 -VTenant -p318 -sVmtu -p319 -I-1 -sg72 -(lp320 -(dp321 -Vaddresses -p322 -(lp323 -sVnetwork -p324 -VNone -p325 -sVprimary -p326 -I01 -sVmtu -p327 -I1350 -sg72 -(lp328 -sVroutes -p329 -(lp330 -sVuse_dhcp -p331 -I00 -sVtype -p332 -Vinterface -p333 -sVname -p334 -Vnic7 -p335 -sasVroutes -p336 -(lp337 -sVuse_dhcp -p338 -I00 -sVtype -p339 -Vovs_bridge -p340 -sVname -p341 -Vbr-tenant -p342 -sass. \ No newline at end of file diff --git a/ci/environments/neutron_dns_domain.yaml b/ci/environments/neutron_dns_domain.yaml deleted file mode 100644 index 899e744e29..0000000000 --- a/ci/environments/neutron_dns_domain.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronDnsDomain: 'openstackgate.local' diff --git a/ci/environments/neutron_l3_qos.yaml b/ci/environments/neutron_l3_qos.yaml deleted file mode 100644 index 5a83884231..0000000000 --- a/ci/environments/neutron_l3_qos.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronL3AgentExtensions: 'fip_qos,gateway_ip_qos,port_forwarding' diff --git a/ci/environments/neutron_sec_group_quota.yaml b/ci/environments/neutron_sec_group_quota.yaml deleted file mode 100644 index 62530b0860..0000000000 --- a/ci/environments/neutron_sec_group_quota.yaml +++ /dev/null @@ -1,2 +0,0 @@ -parameter_defaults: - NeutronSecurityGroupQuota: '150' diff --git a/ci/environments/octavia-kvm.yaml b/ci/environments/octavia-kvm.yaml deleted file mode 100644 index c806f3252b..0000000000 --- a/ci/environments/octavia-kvm.yaml +++ /dev/null @@ -1,7 +0,0 @@ -parameter_defaults: - StandaloneExtraConfig: - # Config related to nova libvirt - octavia::controller::connection_retry_interval: 10 - nova::compute::libvirt::services::libvirt_virt_type: kvm - nova::compute::libvirt::virt_type: kvm - nova::compute::libvirt::cpu_mode: 'host-passthrough' diff --git a/ci/environments/ovb-ha.yaml b/ci/environments/ovb-ha.yaml deleted file mode 100644 index 1d3d7150a9..0000000000 --- a/ci/environments/ovb-ha.yaml +++ /dev/null @@ -1,10 +0,0 @@ -parameter_defaults: - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - Debug: true - HideSensitiveLogs: false - DockerPuppetDebug: True - ExtraConfig: - tripleo::profile::pacemaker::database::mysql_bundle::provider_options: 'evs.suspect_timeout=PT30S' - CorosyncTokenTimeout: 30000 diff --git a/ci/environments/scenario000-multinode-containers.yaml b/ci/environments/scenario000-multinode-containers.yaml deleted file mode 100644 index 717f0f9918..0000000000 --- a/ci/environments/scenario000-multinode-containers.yaml +++ /dev/null @@ -1,27 +0,0 @@ -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::Clustercheck - Debug: true - DockerPuppetDebug: True - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario000-standalone.yaml b/ci/environments/scenario000-standalone.yaml deleted file mode 100644 index 2692431cbb..0000000000 --- a/ci/environments/scenario000-standalone.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# This scenario only install keystone and the required services to run it. -resource_registry: - OS::TripleO::Services::AodhApi: OS::Heat::None - OS::TripleO::Services::AodhEvaluator: OS::Heat::None - OS::TripleO::Services::AodhListener: OS::Heat::None - OS::TripleO::Services::AodhNotifier: OS::Heat::None - OS::TripleO::Services::Apache: OS::Heat::None - OS::TripleO::Services::AuditD: OS::Heat::None - OS::TripleO::Services::BarbicanApi: OS::Heat::None - OS::TripleO::Services::BarbicanBackendDogtag: OS::Heat::None - OS::TripleO::Services::BarbicanBackendKmip: OS::Heat::None - OS::TripleO::Services::BarbicanBackendPkcs11Crypto: OS::Heat::None - OS::TripleO::Services::BarbicanBackendSimpleCrypto: OS::Heat::None - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None - OS::TripleO::Services::BootParams: OS::Heat::None - OS::TripleO::Services::CeilometerAgentCentral: OS::Heat::None - OS::TripleO::Services::CeilometerAgentIpmi: OS::Heat::None - OS::TripleO::Services::CeilometerAgentNotification: OS::Heat::None - OS::TripleO::Services::CephClient: OS::Heat::None - OS::TripleO::Services::CephExternal: OS::Heat::None - OS::TripleO::Services::CephMds: OS::Heat::None - OS::TripleO::Services::CephMgr: OS::Heat::None - OS::TripleO::Services::CephMon: OS::Heat::None - OS::TripleO::Services::CephNfs: OS::Heat::None - OS::TripleO::Services::CephOSD: OS::Heat::None - OS::TripleO::Services::CephRbdMirror: OS::Heat::None - OS::TripleO::Services::CephRgw: OS::Heat::None - OS::TripleO::Services::Chrony: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Clustercheck: OS::Heat::None - OS::TripleO::Services::Collectd: OS::Heat::None - OS::TripleO::Services::ComputeCeilometerAgent: OS::Heat::None - OS::TripleO::Services::ComputeInstanceHA: OS::Heat::None - OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None - OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsDpdk: OS::Heat::None - OS::TripleO::Services::ContainerImagePrepare: ../../deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml - OS::TripleO::Services::DesignateApi: OS::Heat::None - OS::TripleO::Services::DesignateCentral: OS::Heat::None - OS::TripleO::Services::DesignateMDNS: OS::Heat::None - OS::TripleO::Services::DesignateProducer: OS::Heat::None - OS::TripleO::Services::DesignateSink: OS::Heat::None - OS::TripleO::Services::DesignateWorker: OS::Heat::None - OS::TripleO::Services::DesignateBind: OS::Heat::None - OS::TripleO::Services::Etcd: OS::Heat::None - OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None - OS::TripleO::Services::GlanceApi: OS::Heat::None - OS::TripleO::Services::GlanceApiInternal: OS::Heat::None - OS::TripleO::Services::GnocchiApi: OS::Heat::None - OS::TripleO::Services::GnocchiMetricd: OS::Heat::None - OS::TripleO::Services::GnocchiStatsd: OS::Heat::None - OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None - OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None - OS::TripleO::Services::HAproxy: OS::Heat::None - OS::TripleO::Services::HeatApi: OS::Heat::None - OS::TripleO::Services::HeatApiCfn: OS::Heat::None - OS::TripleO::Services::HeatEngine: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::IpaClient: OS::Heat::None - OS::TripleO::Services::Ipsec: OS::Heat::None - OS::TripleO::Services::IronicApi: OS::Heat::None - OS::TripleO::Services::IronicConductor: OS::Heat::None - OS::TripleO::Services::IronicInspector: OS::Heat::None - OS::TripleO::Services::IronicNeutronAgent: OS::Heat::None - OS::TripleO::Services::IronicPxe: OS::Heat::None - OS::TripleO::Services::Iscsid: OS::Heat::None - # OS::TripleO::Services::Keystone: OS::Heat::None - OS::TripleO::Services::Logging::BarbicanApi: OS::Heat::None - OS::TripleO::Services::Logging::GlanceApi: OS::Heat::None - OS::TripleO::Services::Logging::HAProxy: OS::Heat::None - OS::TripleO::Services::Logging::HeatApi: OS::Heat::None - OS::TripleO::Services::Logging::HeatApiCfn: OS::Heat::None - # OS::TripleO::Services::Logging::Keystone: OS::Heat::None - OS::TripleO::Services::Logging::NeutronApi: OS::Heat::None - OS::TripleO::Services::Logging::NeutronCommon: OS::Heat::None - OS::TripleO::Services::Logging::NovaApi: OS::Heat::None - OS::TripleO::Services::Logging::NovaCommon: OS::Heat::None - OS::TripleO::Services::Logging::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::Logging::NovaMetadata: OS::Heat::None - OS::TripleO::Services::Logging::PlacementApi: OS::Heat::None - OS::TripleO::Services::ManilaApi: OS::Heat::None - OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None - OS::TripleO::Services::ManilaBackendFlashBlade: OS::Heat::None - OS::TripleO::Services::ManilaBackendIsilon: OS::Heat::None - OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None - OS::TripleO::Services::ManilaBackendPowerMax: OS::Heat::None - OS::TripleO::Services::ManilaBackendUnity: OS::Heat::None - OS::TripleO::Services::ManilaBackendVNX: OS::Heat::None - OS::TripleO::Services::ManilaScheduler: OS::Heat::None - OS::TripleO::Services::ManilaShare: OS::Heat::None - OS::TripleO::Services::MasqueradeNetworks: OS::Heat::None - OS::TripleO::Services::Memcached: OS::Heat::None - OS::TripleO::Services::MetricsQdr: OS::Heat::None - OS::TripleO::Services::Multipathd: OS::Heat::None - # OS::TripleO::Services::MySQL: OS::Heat::None - OS::TripleO::Services::NeutronApi: OS::Heat::None - OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None - OS::TripleO::Services::NeutronBgpVpnBagpipe: OS::Heat::None - OS::TripleO::Services::NeutronCorePlugin: OS::Heat::None - OS::TripleO::Services::NeutronCorePluginMLNXSDN: OS::Heat::None - OS::TripleO::Services::NeutronCorePluginVTS: OS::Heat::None - OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None - OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None - OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None - OS::TripleO::Services::NeutronL3Agent: OS::Heat::None - OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None - OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None - OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::NeutronSfcApi: OS::Heat::None - OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None - OS::TripleO::Services::NovaAZConfig: OS::Heat::None - OS::TripleO::Services::NovaApi: OS::Heat::None - OS::TripleO::Services::NovaCompute: OS::Heat::None - OS::TripleO::Services::NovaConductor: OS::Heat::None - OS::TripleO::Services::NovaIronic: OS::Heat::None - OS::TripleO::Services::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::NovaLibvirtGuests: OS::Heat::None - OS::TripleO::Services::NovaMetadata: OS::Heat::None - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - OS::TripleO::Services::NovaScheduler: OS::Heat::None - OS::TripleO::Services::NovaVncProxy: OS::Heat::None - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - OS::TripleO::Services::OctaviaApi: OS::Heat::None - OS::TripleO::Services::OctaviaDeploymentConfig: OS::Heat::None - OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None - OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None - OS::TripleO::Services::OctaviaWorker: OS::Heat::None - # OS::TripleO::Services::OsloMessagingNotify: OS::Heat::None - # OS::TripleO::Services::OsloMessagingRpc: OS::Heat::None - OS::TripleO::Services::Pacemaker: OS::Heat::None - OS::TripleO::Services::PacemakerRemote: OS::Heat::None - OS::TripleO::Services::PlacementApi: OS::Heat::None - OS::TripleO::Services::Podman: ../../deployment/podman/podman-baremetal-ansible.yaml - OS::TripleO::Services::Ptp: OS::Heat::None - OS::TripleO::Services::Qdr: OS::Heat::None - # OS::TripleO::Services::RabbitMQ: OS::Heat::None - OS::TripleO::Services::Redis: OS::Heat::None - OS::TripleO::Services::RsyslogSidecar: OS::Heat::None - OS::TripleO::Services::SELinux: OS::Heat::None - OS::TripleO::Services::Securetty: OS::Heat::None - OS::TripleO::Services::Snmp: OS::Heat::None - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::TLSProxyBase: OS::Heat::None - OS::TripleO::Services::Tempest: OS::Heat::None - OS::TripleO::Services::Tmpwatch: ../../deployment/logrotate/tmpwatch-install.yaml - OS::TripleO::Services::TripleoPackages: OS::Heat::None - OS::TripleO::Services::Tuned: OS::Heat::None - OS::TripleO::Services::Unbound: OS::Heat::None - OS::TripleO::Services::UndercloudTLS: OS::Heat::None - OS::TripleO::Services::UndercloudUpgrade: OS::Heat::None -parameter_defaults: - Debug: true - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml deleted file mode 100644 index fb7578e47f..0000000000 --- a/ci/environments/scenario001-multinode-containers.yaml +++ /dev/null @@ -1,168 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentIpmi: ../../deployment/ceilometer/ceilometer-agent-ipmi-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml - OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::LoginDefs - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Securetty - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::Redis - - OS::TripleO::Services::AodhApi - - OS::TripleO::Services::AodhEvaluator - - OS::TripleO::Services::AodhNotifier - - OS::TripleO::Services::AodhListener - - OS::TripleO::Services::CeilometerAgentCentral - - OS::TripleO::Services::CeilometerAgentIpmi - - OS::TripleO::Services::CeilometerAgentNotification - - OS::TripleO::Services::ComputeCeilometerAgent - - OS::TripleO::Services::GnocchiApi - - OS::TripleO::Services::GnocchiMetricd - - OS::TripleO::Services::GnocchiStatsd - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CinderApi - - OS::TripleO::Services::CinderBackup - - OS::TripleO::Services::CinderScheduler - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Collectd - - OS::TripleO::Services::MetricsQdr - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the - # 'ceilometer-high-rate' Gnocchi archive policy to keep 1 point every 60s. - # The test will take 2 minutes instead of 10 minutes. Note that tempest - # telemetry.alarm_granularity must in sync with the archive policy, 60s - # too. - ceilometer::agent::polling::polling_interval: 15 - # NOTE(mmagr): uncomment when QDR mesh will actually work (followup on review.opendev.org/702754) - #tripleo::profile::base::metrics::qdr::router_mode: interior - NotificationDriver: 'messagingv2' - ManagePolling: true - ManagePipeline: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - CeilometerQdrPublishEvents: true - ManageEventPipeline: true - Debug: true - DockerPuppetDebug: True - CephPools: - - name: altrbd - rule_name: replicated_rule - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephEnableDashboard: true - NovaEnableRbdBackend: true - CinderEnableRbdBackend: true - CinderRbdExtraPools: altrbd - CinderBackupBackend: ceph - GlanceBackend: rbd - GnocchiBackend: rbd - CinderEnableIscsiBackend: false - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdConnectionType: amqp1 - CollectdExtraPlugins: - - rrdtool - CollectdEnableSensubility: true - CollectdEnableLibpodstats: true - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - # Remove ContainerCli once this scenario is tested on CentOS8 - ContainerCli: podman - CephConfigPath: "/etc/ceph" - CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml" - CephSpecFqdn: true - CephOsdSpec: - data_devices: - paths: - - /dev/ceph_vg/ceph_lv_data diff --git a/ci/environments/scenario001-standalone.yaml b/ci/environments/scenario001-standalone.yaml deleted file mode 100644 index 8c49e1105b..0000000000 --- a/ci/environments/scenario001-standalone.yaml +++ /dev/null @@ -1,222 +0,0 @@ -resource_registry: - OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml - OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml - OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml - OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml - OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml - OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml - OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml - OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml - OS::TripleO::Services::Multipathd: ../../deployment/multipathd/multipathd-container-ansible.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::Rsyslog: ../../deployment/logging/rsyslog-container-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - -parameter_defaults: - NodeDataLookup: - AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"} - 8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"} - StandaloneExtraConfig: - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the 'high' Gnocchi - # archive policy to keep 1 point every 60s. The test will take 2 minutes - # instead of 10 minutes. Note that tempest telemetry.alarm_granularity must - # in sync with the archive policy, 60s too. - ceilometer::agent::polling::polling_interval: 15 - tripleo::profile::base::metrics::qdr::interior_mesh_nodes: '' - collectd::plugin::ceph::daemons: - - 'ceph-mon.controller-00' - - 'ceph-mon.controller-01' - - 'ceph-mon.controller-02' - - 'ceph-osd.01' - - 'ceph-osd.07' - NotificationDriver: 'messagingv2' - ManagePolling: true - HeatConfigureDelegatedRoles: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - CeilometerQdrPublishEvents: true - CeilometerQdrPublishMetrics: true - ManageEventPipeline: true - ManagePipeline: true - Debug: true - DeployedCeph: true - CephEnableDashboard: true - CephDashboardPort: 8445 - GrafanaDashboardPort: 3200 - CinderRbdExtraPools: altrbd,pool2,pool3 - CephPools: - - name: altrbd - rule_name: replicated_rule - application: rbd - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephExtraKeys: - - name: "client.glance" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "profile rbd pool=images" - key: "AQBRgQ9eAAAAABAAv84zEilJYZPNuJ0Iwn9Ndg==" - mode: "0600" - CephExternalMultiConfig: - # create client conf and key file for two non-existent external ceph clusters - - cluster: 'ceph2' - fsid: 'af25554b-42f6-4d2b-9b9b-d08a1132d3e8' - external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7' - keys: - - name: "client.openstack" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images" - key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q==" - mode: "0600" - dashboard_enabled: false - - cluster: 'ceph3' - fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a' - external_cluster_mon_ips: '172.18.0.8,172.18.0.9,172.18.0.10' - keys: - - name: "client.openstack" - caps: - mgr: "allow *" - mon: "profile rbd" - osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images" - key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB2Q==" - mode: "0600" - dashboard_enabled: false - NovaEnableRbdBackend: true - CinderEnableRbdBackend: true - CinderRbdBackendName: tripleo_ceph,tripleo_ceph2,tripleo_ceph3 - CinderRbdMultiConfig: - tripleo_ceph2: - CephClusterName: ceph2 - CephClusterFSID: af25554b-42f6-4d2b-9b9b-d08a1132d3e8 - CinderRbdAvailabilityZone: ceph2-AZ - tripleo_ceph3: - CephClusterName: ceph3 - CephClusterFSID: e2cba068-5f14-4b0f-b047-acf375c0004a - CinderRbdAvailabilityZone: ceph3-AZ - CinderBackupBackend: ceph - GlanceEnabledImportMethods: 'glance-direct,web-download,copy-image' - # For copy-image method, we are overriding the policy here to allow - # everyone and every type of image (private or public) to copy. - # This way we will be able to test copy image via non-admin as well - # as on private images. The policy is default to admin only in glance - GlanceApiPolicies: - glance-copy_image: - key: "copy_image" - value: "" - GlanceBackend: cinder - GlanceStoreDescription: 'Cinder glance store' - GlanceMultistoreConfig: - rbd_store: - GlanceBackend: rbd - GlanceStoreDescription: 'RBD glance store' - CephClientUserName: 'glance' - CephClusterName: ceph - GnocchiBackend: rbd - CinderEnableIscsiBackend: false - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - EnableSTF: true - MetricsQdrAddresses: - - prefix: collectd - distribution: multicast - MetricsQdrSSLProfiles: - - name: sslProfile - CollectdConnectionType: amqp1 - CollectdAmqpInterval: 5 - CollectdDefaultPollingInterval: 5 - CollectdEnableSensubility: true - CollectdSensubilityTransport: amqp1 - CollectdSensubilityLogLevel: DEBUG - CollectdEnableLibpodstats: true - CollectdAmqpInstances: - notify: - notify: true - format: JSON - presettle: false - telemetry: - format: JSON - presettle: false - CollectdAmqpSendQueueLimit: 40 - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - ContainerCli: podman - CephAdmVerbose: true - CephConfigOverrides: - # put logs in /var/log/ceph/ - log_to_file: true - mon_cluster_log_to_file: true - # disable logging to journald so we don't log twice - log_to_stderr: false - mon_cluster_log_to_stderr: false - log_to_journald: false - mon_cluster_log_to_journald: false - # test values - anotherkey: anothervalue - global: - globalkey: globalvalue - osd: - osdkey: osdvalue - mon: - mon_warn_on_pool_no_redundancy: false - mon_warn_on_insecure_global_id_reclaim_allowed: false - NfsUrl: 127.0.0.1 - CephMsgrSecureMode: true - CephConfigPath: "/etc/ceph" - DisableCephadm: true - LVMFilterEnabled: true - LVMFilterAllowlist: - - /dev/loop2 - - /dev/loop3 - - /dev/loop4 - MultipathdEnable: true - NovaLibvirtVolumeUseMultipath: true - NovaShowHostStatus: all - NovaApiHostStatusPolicy: > - (role:reader and system_scope:all) or (role:reader and project_id:%(project_id)s) - RsyslogElasticsearchSetting: - Server: '127.0.0.1:9200' diff --git a/ci/environments/scenario002-standalone.yaml b/ci/environments/scenario002-standalone.yaml deleted file mode 100644 index 0063381918..0000000000 --- a/ci/environments/scenario002-standalone.yaml +++ /dev/null @@ -1,68 +0,0 @@ -resource_registry: - OS::TripleO::Services::AodhApi: ../../deployment/aodh/aodh-api-container-puppet.yaml - OS::TripleO::Services::AodhEvaluator: ../../deployment/aodh/aodh-evaluator-container-puppet.yaml - OS::TripleO::Services::AodhListener: ../../deployment/aodh/aodh-listener-container-puppet.yaml - OS::TripleO::Services::AodhNotifier: ../../deployment/aodh/aodh-notifier-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml - OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml - OS::TripleO::Services::ComputeCeilometerAgent: ../../deployment/ceilometer/ceilometer-agent-compute-container-puppet.yaml - OS::TripleO::Services::GnocchiApi: ../../deployment/gnocchi/gnocchi-api-container-puppet.yaml - OS::TripleO::Services::GnocchiMetricd: ../../deployment/gnocchi/gnocchi-metricd-container-puppet.yaml - OS::TripleO::Services::GnocchiStatsd: ../../deployment/gnocchi/gnocchi-statsd-container-puppet.yaml - OS::TripleO::Services::HeatApi: ../../deployment/heat/heat-api-container-puppet.yaml - OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml - OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml - OS::TripleO::Services::BarbicanApi: ../../deployment/barbican/barbican-api-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml - OS::TripleO::Services::BarbicanBackendSimpleCrypto: ../../deployment/barbican/barbican-backend-simple-crypto-puppet.yaml - # NOTE(mwhahaha): cheat and use the horizon "service" for our test service - # since disable horizon. This allows us to not have to keep some test service - # in the defaults just for this case. - OS::TripleO::Services::Horizon: ../../deployment/tests/test-container-volume.yaml - -parameter_defaults: - StandaloneExtraConfig: - # NOTE(sileht): To decrease the time test_telemetry_integration takes We - # configure Ceilometer to poll more, We configure the 'high' Gnocchi - # archive policy to keep 1 point every 60s. The test will take 2 minutes - # instead of 10 minutes. Note that tempest telemetry.alarm_granularity must - # in sync with the archive policy, 60s too. - ceilometer::agent::polling::polling_interval: 15 - NotificationDriver: 'messagingv2' - ManagePolling: true - ManagePipeline: true - CeilometerEnableGnocchi: true - PipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - EventPipelinePublishers: - - gnocchi://?archive_policy=ceilometer-high-rate - Debug: true - DockerPuppetDebug: true - SwiftCeilometerPipelineEnabled: false - BarbicanSimpleCryptoGlobalDefault: true - ContainerCli: podman - # Test mounting a container volume into the heat api container - ContainerTestVolumeName: test_volume - CinderVolumeOptVolumes: - - test_volume:/testvol - GlanceApiOptVolumes: - - test_volume:/testvol - HeatApiOptVolumes: - - test_volume:/testvol - HeatEngineApiOptVolumes: - - test_volume:/testvol - NeutronApiOptVolumes: - - test_volume:/testvol - NovaComputeOptVolumes: - - test_volume:/testvol - StandaloneExtraGroupVars: - cinder_fake_group_var_one: var_one_override - StandaloneParameters: - LVMFilterEnabled: true - LVMFilterAllowlist: - - /dev/allowed - NovaShowHostStatus: unknown-only diff --git a/ci/environments/scenario003-standalone.yaml b/ci/environments/scenario003-standalone.yaml deleted file mode 100644 index 2ecfd26f43..0000000000 --- a/ci/environments/scenario003-standalone.yaml +++ /dev/null @@ -1,48 +0,0 @@ -resource_registry: - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::DesignateApi: ../../deployment/designate/designate-api-container-puppet.yaml - OS::TripleO::Services::DesignateCentral: ../../deployment/designate/designate-central-container-puppet.yaml - OS::TripleO::Services::DesignateProducer: ../../deployment/designate/designate-producer-container-puppet.yaml - OS::TripleO::Services::DesignateWorker: ../../deployment/designate/designate-worker-container-puppet.yaml - OS::TripleO::Services::DesignateMDNS: ../../deployment/designate/designate-mdns-container-puppet.yaml - OS::TripleO::Services::DesignateBind: ../../deployment/designate/designate-bind-container.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - OS::TripleO::Services::Unbound: ../../deployment/unbound/unbound-container-ansible.yaml - -parameter_defaults: - Debug: true - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - KeystoneTokenProvider: 'fernet' - SwiftCeilometerPipelineEnabled: false - NotificationDriver: 'noop' - NeutronPluginExtensions: qos,port_security,dns_domain_ports - NotifyPort: 5672 - ContainerCli: podman - DesignateBackendListenIPs: ["192.168.24.20"] - ExtraFirewallRules: - '301 allow arbitrary tcp rule': - dport: 12345 - proto: tcp - source: 127.0.0.1 - action: insert - '302 allow arbitrary udp rule': - dport: 12345 - proto: udp - source: 127.0.0.1 - action: append - StandaloneParameters: - ExtraFirewallRules: - '303 allow arbitrary tcp rule for controller': - dport: 12347 - proto: tcp - source: 127.0.0.1 - action: insert diff --git a/ci/environments/scenario004-standalone.yaml b/ci/environments/scenario004-standalone.yaml deleted file mode 100644 index 8dd0701af7..0000000000 --- a/ci/environments/scenario004-standalone.yaml +++ /dev/null @@ -1,87 +0,0 @@ -resource_registry: - OS::TripleO::Services::Aide: ../../deployment/aide/aide-baremetal-ansible.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Redis: OS::Heat::None - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml - OS::TripleO::Services::CephMds: ../../deployment/cephadm/ceph-mds.yaml - OS::TripleO::Services::CephNfs: ../../deployment/cephadm/ceph-nfs.yaml - OS::TripleO::Services::CephRgw: ../../deployment/cephadm/ceph-rgw.yaml - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::ManilaApi: ../../deployment/manila/manila-api-container-puppet.yaml - OS::TripleO::Services::ManilaScheduler: ../../deployment/manila/manila-scheduler-container-puppet.yaml - OS::TripleO::Services::ManilaShare: ../../deployment/manila/manila-share-pacemaker-puppet.yaml - OS::TripleO::Services::ManilaBackendCephFs: ../../deployment/manila/manila-backend-cephfs.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::LoginDefs: ../../deployment/login-defs/login-defs-baremetal-ansible.yaml - -parameter_defaults: - GlanceSparseUploadEnabled: true - ManagePolling: true - Debug: true - CephEnableDashboard: false - CephDashboardPort: 8445 - CephAdmVerbose: true - GrafanaDashboardPort: 3200 - DeployedCeph: true - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephRgwKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' - NovaEnableRbdBackend: true - GlanceBackend: rbd - CeilometerEnableGnocchi: true - GnocchiBackend: rbd - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdExtraPlugins: - - rrdtool - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - CephConfigOverrides: - # put logs in /var/log/ceph/ - log_to_file: true - mon_cluster_log_to_file: true - # disable logging to journald so we don't log twice - log_to_stderr: false - mon_cluster_log_to_stderr: false - log_to_journald: false - mon_cluster_log_to_journald: false - # test values - globalkey: globalvalue - anotherkey: anothervalue - mon: - mon_warn_on_pool_no_redundancy: false - mon_warn_on_insecure_global_id_reclaim_allowed: false - ManilaCephFSCephFSProtocolHelperType: 'NFS' - DisableCephadm: true - CephConfigPath: "/etc/ceph" - CephObservabilityDataOnly: true diff --git a/ci/environments/scenario007-multinode-containers.yaml b/ci/environments/scenario007-multinode-containers.yaml deleted file mode 100644 index b65f5f7e49..0000000000 --- a/ci/environments/scenario007-multinode-containers.yaml +++ /dev/null @@ -1,114 +0,0 @@ -resource_registry: - OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml - OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml - OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml - OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml - OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml - OS::TripleO::Services::ComputeNeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - # Disable OVN services - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - # environments/docker-ha.yaml enables the cinder service. So disable cinder services - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatApiCfn - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::NeutronDhcpAgent - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::NeutronOvsAgent - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - # Required for Centos 7.3 and Qemu 2.6.0 - nova::compute::libvirt::cpu_mode: 'none' - # For ML2/OVS - NeutronMechanismDrivers: 'openvswitch' - NeutronPluginExtensions: "qos,port_security" - NeutronTypeDrivers: 'vxlan,vlan,flat,gre' - NeutronNetworkType: 'vxlan' - NeutronServicePlugins: 'router,qos,segments,trunk' - NeutronVniRanges: ['1:4094', ] - NeutronEnableDVR: false - DhcpAgentNotification: true - KernelIpNonLocalBind: 0 - Debug: true - DockerPuppetDebug: True - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - MessageOfTheDay: | - This is a test message of MessageOfTheDay - ControllerParameters: - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - This node is a Controller node. - MessageOfTheDay: | - This is a test message of MessageOfTheDay - This node is a Controller node. - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - IronicCleaningDiskErase: 'metadata' - NotificationDriver: 'noop' diff --git a/ci/environments/scenario007-standalone.yaml b/ci/environments/scenario007-standalone.yaml deleted file mode 100644 index 13ace9c438..0000000000 --- a/ci/environments/scenario007-standalone.yaml +++ /dev/null @@ -1,51 +0,0 @@ -resource_registry: - OS::TripleO::Services::NeutronMl2PluginBase: ../../deployment/neutron/neutron-plugin-ml2.yaml - OS::TripleO::Services::NeutronApi: ../../deployment/neutron/neutron-api-container-puppet.yaml - OS::TripleO::Services::NeutronCorePlugin: ../../deployment/neutron/neutron-plugin-ml2-container-puppet.yaml - OS::TripleO::Services::NeutronMetadataAgent: ../../deployment/neutron/neutron-metadata-container-puppet.yaml - OS::TripleO::Services::NeutronOvsAgent: ../../deployment/neutron/neutron-ovs-agent-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NeutronL3Agent: ../../deployment/neutron/neutron-l3-container-puppet.yaml - - OS::TripleO::Services::BlockStorageCinderVolume: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None - # Disable OVN services - OS::TripleO::Services::OVNController: OS::Heat::None - OS::TripleO::Services::OVNDBs: OS::Heat::None - OS::TripleO::Services::OVNMetadataAgent: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None -parameter_defaults: - DhcpAgentNotification: true - NeutronMechanismDrivers: ['openvswitch'] - NeutronTypeDrivers: 'vxlan,vlan,flat,gre' - NeutronNetworkType: 'vxlan' - NeutronPluginExtensions: "qos,port_security" - NeutronServicePlugins: 'router,qos,segments,trunk' - NeutronVniRanges: ['1:4094', ] - NeutronEnableDVR: false - NeutronL3HA: true - KernelIpNonLocalBind: 0 - Debug: true - DockerPuppetDebug: True - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - MessageOfTheDay: | - This is a test message of MessageOfTheDay - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - NotificationDriver: 'noop' - ContainerCli: podman diff --git a/ci/environments/scenario010-multinode-containers.yaml b/ci/environments/scenario010-multinode-containers.yaml deleted file mode 100644 index 016877c08e..0000000000 --- a/ci/environments/scenario010-multinode-containers.yaml +++ /dev/null @@ -1,104 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml - OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml - OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::Pacemaker: ../../deployment/pacemaker/pacemaker-baremetal-puppet.yaml - OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - ControllerNetworkConfigTemplate: 'templates/ci/multinode.j2' - ComputeNetworkConfigTemplate: 'templates/ci/multinode.j2' - ControllerServices: - - OS::TripleO::Services::CACerts - - OS::TripleO::Services::Clustercheck - - OS::TripleO::Services::ContainerImagePrepare - - OS::TripleO::Services::Podman - - OS::TripleO::Services::Kernel - - OS::TripleO::Services::Keystone - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiInternal - - OS::TripleO::Services::MySQL - - OS::TripleO::Services::MySQLClient - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::OVNDBs - - OS::TripleO::Services::OVNController - - OS::TripleO::Services::OVNMetadataAgent - - OS::TripleO::Services::OctaviaApi - - OS::TripleO::Services::OctaviaDeploymentConfig - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::OctaviaHousekeeping - - OS::TripleO::Services::OctaviaWorker - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::Memcached - - OS::TripleO::Services::Pacemaker - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::PlacementApi - - OS::TripleO::Services::NovaMetadata - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::Snmp - - OS::TripleO::Services::Timesync - - OS::TripleO::Services::Timezone - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - - OS::TripleO::Services::TripleoPackages - - OS::TripleO::Services::TripleoFirewall - - OS::TripleO::Services::Sshd - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::Iscsid - - OS::TripleO::Services::Multipathd - ControllerExtraConfig: - nova::compute::libvirt::services::libvirt_virt_type: qemu - nova::compute::libvirt::virt_type: qemu - octavia::controller::connection_retry_interval: 10 - Debug: true - CephPools: - - name: altrbd - rule_name: replicated_rule - CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' - CephClusterName: mycephcluster - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - NovaEnableRbdBackend: true - CeilometerEnableGnocchi: true - GlanceBackend: rbd - GnocchiRbdPoolName: metrics - SwiftCeilometerPipelineEnabled: false - NeutronEnableForceMetadata: true - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaManageNovaFlavor: true - # For now, we hardcode it but soon it'll be generated in tripleo-common - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaGenerateCerts: true - ContainerCli: podman - CephConfigPath: "/etc/ceph" - CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml" - CephSpecFqdn: true - CephOsdSpec: - data_devices: - paths: - - /dev/ceph_vg/ceph_lv_data diff --git a/ci/environments/scenario010-standalone.yaml b/ci/environments/scenario010-standalone.yaml deleted file mode 100644 index f31ee0896d..0000000000 --- a/ci/environments/scenario010-standalone.yaml +++ /dev/null @@ -1,70 +0,0 @@ -resource_registry: - OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml - OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml - OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: ../../deployment/horizon/horizon-container-puppet.yaml - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - StandaloneExtraConfig: - octavia::controller::connection_retry_interval: 10 - OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaManageNovaFlavor: true - OctaviaGenerateCerts: true - NodeDataLookup: - AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"} - 8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"} - Debug: true - HideSensitiveLogs: false - #NOTE: These ID's and keys should be regenerated for - # a production deployment. What is here is suitable for - # developer and CI testing only. - DeployedCeph: true - CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - #NOTE: This scenario uses rbd backend to test the deployment feature to - # upload the amphora image in raw format instead of qcow2 format. - # (See https://bugs.launchpad.net/tripleo/+bug/1778303 for details) - NovaEnableRbdBackend: true - GlanceBackend: rbd - BannerText: | - ****************************************************************** - * This system is for the use of authorized users only. Usage of * - * this system may be monitored and recorded by system personnel. * - * Anyone using this system expressly consents to such monitoring * - * and is advised that if such monitoring reveals possible * - * evidence of criminal activity, system personnel may provide * - * the evidence from such monitoring to law enforcement officials.* - ****************************************************************** - CollectdExtraPlugins: - - rrdtool - LoggingServers: - - host: 127.0.0.1 - port: 24224 - TtyValues: - - console - - tty1 - - tty2 - - tty3 - - tty4 - - tty5 - - tty6 - ContainerCli: podman - CephConfigPath: "/etc/ceph" diff --git a/ci/environments/scenario012-standalone.yaml b/ci/environments/scenario012-standalone.yaml deleted file mode 100644 index 6bb824ee11..0000000000 --- a/ci/environments/scenario012-standalone.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# touching file so CI runs scenario012 -resource_registry: - OS::TripleO::Services::NovaIronic: ../../deployment/nova/nova-ironic-container-puppet.yaml - OS::TripleO::Services::IronicApi: ../../deployment/ironic/ironic-api-container-puppet.yaml - OS::TripleO::Services::IronicConductor: ../../deployment/ironic/ironic-conductor-container-puppet.yaml - OS::TripleO::Services::IronicPxe: ../../deployment/ironic/ironic-pxe-container-puppet.yaml - OS::TripleO::NodeExtraConfigPost: ../common/ironic_standalone_post.yaml - OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-container-puppet.yaml - OS::TripleO::Services::NeutronDhcpAgent: ../../deployment/neutron/neutron-dhcp-container-puppet.yaml - OS::TripleO::Services::NovaCompute: OS::Heat::None - OS::TripleO::Services::NovaLibvirt: OS::Heat::None - OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml - -parameter_defaults: - Debug: true - DockerPuppetDebug: True - NotificationDriver: 'noop' - SwiftCeilometerPipelineEnabled: false - IronicCleaningDiskErase: 'metadata' - NovaSchedulerDiscoverHostsInCellsInterval: 15 - NeutronMechanismDrivers: ovn - NeutronNetworkType: vlan - NeutronTypeDrivers: local,geneve,vlan,flat - DhcpAgentNotification: true - IronicApiMaxRetries: 180 - IronicDefaultNetworkInterface: neutron - IronicAutomatedClean: false - NeutronFlatNetworks: datacentre - NeutronNetworkVLANRanges: 'datacentre:500:599,tenant:300:399' diff --git a/ci/environments/scenario013-standalone.yaml b/ci/environments/scenario013-standalone.yaml deleted file mode 100644 index 562b42ae23..0000000000 --- a/ci/environments/scenario013-standalone.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# This scenario is same as scenario10 but without Ceph. -resource_registry: - OS::TripleO::Services::CinderApi: OS::Heat::None - OS::TripleO::Services::CinderBackup: OS::Heat::None - OS::TripleO::Services::CinderScheduler: OS::Heat::None - OS::TripleO::Services::CinderVolume: OS::Heat::None - OS::TripleO::Services::Horizon: OS::Heat::None - OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml - OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml - OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml - OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml - OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml - OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml - OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml - OS::TripleO::Services::SwiftDispersion: OS::Heat::None - OS::TripleO::Services::SwiftProxy: OS::Heat::None - OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None - OS::TripleO::Services::SwiftStorage: OS::Heat::None - -parameter_defaults: - StandaloneExtraConfig: - octavia::controller::connection_retry_interval: 10 - OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub - OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 - OctaviaCaKeyPassphrase: 'upstreamci' - OctaviaManageNovaFlavor: true - OctaviaGenerateCerts: true - Debug: true - GlanceBackend: 'file' - ContainerCli: podman diff --git a/ci/environments/standalone-ipa.yaml b/ci/environments/standalone-ipa.yaml deleted file mode 100644 index 31daf18851..0000000000 --- a/ci/environments/standalone-ipa.yaml +++ /dev/null @@ -1,22 +0,0 @@ -resource_registry: - OS::TripleO::Services::IpaClient: ../../deployment/ipa/ipaservices-baremetal-ansible.yaml - -parameter_defaults: - CloudDomain: "ooo.test" - CloudName: "overcloud.ooo.test" - CloudNameInternal: "overcloud.internalapi.ooo.test" - CloudNameStorage: "overcloud.storage.ooo.test" - CloudNameStorageManagement: "overcloud.storagemgmt.ooo.test" - CloudNameCtlplane: "overcloud.ctlplane.ooo.test" - IdMServer: "ipa.ooo.test" - IdMDomain: "ooo.test" - IdMAdminPassword: "fce95318204114530f31f885c9df588f" - # domain name used by the host - NeutronDnsDomain: "ooo.test" - PasswordAuthentication: 'yes' - HAProxyExternalNetwork: ctlplane - # Add setting for custom DNS server - # DnsServers: ["ip_for_dns_server"] - DnsSearchDomains: ["ooo.test"] - LocalContainerRegistry: "192.168.24.1" - AddVipsToEtcHosts: True diff --git a/ci/network_data.yaml b/ci/network_data.yaml deleted file mode 100644 index 576bb514aa..0000000000 --- a/ci/network_data.yaml +++ /dev/null @@ -1,50 +0,0 @@ -- name: External - name_lower: external - mtu: 1350 - vip: true - subnets: - external_subnet: - ip_subnet: 10.0.0.0/24 - gateway_ip: 10.0.0.1 - allocation_pools: - - start: 10.0.0.10 - end: 10.0.0.50 -- name: InternalApi - name_lower: internal_api - mtu: 1350 - vip: true - subnets: - internal_api_subnet: - ip_subnet: 172.17.0.0/24 - allocation_pools: - - start: 172.17.0.10 - end: 172.17.0.250 -- name: Storage - name_lower: storage - mtu: 1350 - vip: true - subnets: - storage_subnet: - ip_subnet: 172.18.0.0/24 - allocation_pools: - - start: 172.18.0.10 - end: 172.18.0.250 -- name: StorageMgmt - name_lower: storage_mgmt - mtu: 1350 - vip: true - subnets: - storage_mgmt_subnet: - ip_subnet: 172.19.0.0/24 - allocation_pools: - - start: 172.19.0.10 - end: 172.19.0.250 -- name: Tenant - name_lower: tenant - mtu: 1350 - subnets: - tenant_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/network_data_v6.yaml b/ci/network_data_v6.yaml deleted file mode 100644 index 3862711c53..0000000000 --- a/ci/network_data_v6.yaml +++ /dev/null @@ -1,54 +0,0 @@ -- name: External - name_lower: external - mtu: 1350 - vip: true - ipv6: true - subnets: - external_subnet: - ipv6_subnet: 2001:db8:fd00:1000::/64 - gateway_ipv6: 2001:db8:fd00:1000::1 - ipv6_allocation_pools: - - start: 2001:db8:fd00:1000::10 - end: 2001:db8:fd00:1000:ffff:ffff:ffff:fffe -- name: InternalApi - name_lower: internal_api - mtu: 1350 - vip: true - ipv6: true - subnets: - internal_api_subnet: - ipv6_subnet: fd00:fd00:fd00:2000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:2000::10 - end: fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe -- name: Storage - name_lower: storage - mtu: 1350 - vip: true - ipv6: true - subnets: - storage_subnet: - ipv6_subnet: fd00:fd00:fd00:3000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:3000::10 - end: fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe -- name: StorageMgmt - name_lower: storage_mgmt - mtu: 1350 - vip: true - ipv6: true - subnets: - storage_mgmt_subnet: - ipv6_subnet: fd00:fd00:fd00:4000::/64 - ipv6_allocation_pools: - - start: fd00:fd00:fd00:4000::10 - end: fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe -- name: Tenant - name_lower: tenant - mtu: 1350 - subnets: - tenant_subnet: - ip_subnet: 172.16.0.0/24 - allocation_pools: - - start: 172.16.0.10 - end: 172.16.0.250 diff --git a/ci/vip_data.yaml b/ci/vip_data.yaml deleted file mode 100644 index 653130bd1d..0000000000 --- a/ci/vip_data.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt -- network: internal_api -- network: storage -- ip_address: 10.0.0.5 # CI jobs rely on this fixed ip for the external VIP - network: external -- network: ctlplane diff --git a/ci/vip_data_v6.yaml b/ci/vip_data_v6.yaml deleted file mode 100644 index 74c69c629f..0000000000 --- a/ci/vip_data_v6.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- network: storage_mgmt -- network: internal_api -- network: storage -- ip_address: 2001:DB8:FD00:1000:0:0:0:5 # CI jobs rely on this fixed ip for the external VIP - network: external -- network: ctlplane diff --git a/common/common-container-config-scripts.yaml b/common/common-container-config-scripts.yaml deleted file mode 100644 index 95b50cb56a..0000000000 --- a/common/common-container-config-scripts.yaml +++ /dev/null @@ -1,25 +0,0 @@ -- name: Create fcontext entry for container-config-scripts - community.general.sefcontext: - target: "/var/lib/container-config-scripts(/.*)?" - setype: container_file_t - state: present - tags: - - container_config_scripts - -- name: Create /var/lib/container-config-scripts - become: true - file: - path: /var/lib/container-config-scripts - state: directory - setype: container_file_t - tags: - - container_config_scripts - -- name: Write container config scripts - become: true - no_log: True - tripleo_container_config_scripts: - config_data: "{{ lookup('file', tripleo_role_name + '/container_config_scripts.yaml', errors='ignore') | default({}, True) | from_yaml }}" - config_dir: /var/lib/container-config-scripts - tags: - - container_config_scripts diff --git a/common/common-container-setup-tasks.yaml b/common/common-container-setup-tasks.yaml deleted file mode 100644 index dd24a31d5f..0000000000 --- a/common/common-container-setup-tasks.yaml +++ /dev/null @@ -1,104 +0,0 @@ -- name: Create and ensure setype for /var/log/containers directory - become: true - file: - path: /var/log/containers - state: directory - selevel: s0 - mode: 0750 - tags: - - host_config - -- name: Create ContainerLogStdoutPath directory - become: true - file: - path: "{{ container_log_stdout_path }}" - state: directory - selevel: s0 - tags: - - host_config - -- name: Create /var/lib/tripleo-config directory - become: true - file: - path: /var/lib/tripleo-config - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - -# The container config files -# /var/lib/container-startup-configs.json is removed as we now write -# per-step files instead -- name: Clean old /var/lib/container-startup-configs.json file - become: true - file: - path: /var/lib/container-startup-configs.json - state: absent - tags: - - container_startup_configs - -# For legacy, can be removed in Train cycle -- name: Clean old /var/lib/docker-container-startup-configs.json file - become: true - file: - path: /var/lib/docker-container-startup-configs.json - state: absent - tags: - - container_startup_configs - -- import_tasks: common_container_config_scripts.yaml - -# This file location is deprecated and the new location is now: -# /var/lib/tripleo-config/container-startup-config/step_X/.json -# Can be removed in V cycle -- name: Write /var/lib/tripleo-config/container-startup-config-readme.txt - become: true - no_log: True - copy: - content: "Container startup configs moved to /var/lib/tripleo-config/container-startup-config" - dest: /var/lib/tripleo-config/container-startup-config-readme.txt - force: true - mode: '0600' - tags: - - container_startup_configs - -- name: Generate startup configs files per step and per container - become: true - container_startup_config: - config_base_dir: /var/lib/tripleo-config/container-startup-config - config_data: "{{ lookup('template', (tripleo_role_name + '/docker_config.yaml'), errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_startup_configs - -- name: Create /var/lib/kolla/config_files directory - become: true - file: - path: /var/lib/kolla/config_files - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - container_startup_configs - -- name: Create /var/lib/config-data directory - become: true - file: - path: /var/lib/config-data - mode: 0755 - state: directory - setype: container_file_t - selevel: s0 - -- name: Write container config json files - become: true - tripleo_container_configs: - config_data: "{{ lookup('file', tripleo_role_name + '/kolla_config.yaml', errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_startup_configs diff --git a/common/container-puppet.sh b/common/container-puppet.sh deleted file mode 100644 index b991b47a49..0000000000 --- a/common/container-puppet.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -e -[ "$DEBUG" = "false" ] || set -x -rm -rf /etc/puppet -mkdir -p /etc/puppet -cp -dR /tmp/puppet-etc/* /etc/puppet -rm -Rf /etc/puppet/ssl # not in use and causes permission errors -echo "{\"step\": $STEP}" > /etc/puppet/hieradata/docker_puppet.json -TAGS="" -if [ -n "$PUPPET_TAGS" ]; then - TAGS="--tags \"$PUPPET_TAGS\"" -fi - -if [ ! -z ${STEP_CONFIG+x} ]; then - # package and service should NOT be managed inside containers - echo -e "noop_resource('package')" | tee /etc/config.pp - echo -e "noop_resource('service')" | tee -a /etc/config.pp - - echo -e "${STEP_CONFIG}" | tee -a /etc/config.pp -fi - -CHECK_MODE="" -if [ -d "/tmp/puppet-check-mode" ]; then - mkdir -p /etc/puppet/check-mode - cp -a /tmp/puppet-check-mode/* /etc/puppet/check-mode - CHECK_MODE="--hiera_config /etc/puppet/check-mode/hiera.yaml" -fi - -# Create a reference timestamp to easily find all files touched by -# puppet. The sync ensures we get all the files we want due to -# different timestamp. -conf_data_path="/var/lib/config-data/${NAME}" -origin_of_time="${conf_data_path}.origin_of_time" -touch $origin_of_time -sync - -export NET_HOST="${NET_HOST:-false}" -set +e -if [ "$NET_HOST" == "false" ]; then - export FACTER_hostname=$HOSTNAME -fi -export FACTER_uuid=$(cat /sys/class/dmi/id/product_uuid | tr '[:upper:]' '[:lower:]') -echo 'Running puppet' -# FIXME(bogdando): stdout may be falling behind of the logged syslog messages -set -x -/usr/bin/puppet apply --summarize \ - --detailed-exitcodes \ - --color=false \ - --modulepath=/etc/puppet/modules:/usr/share/openstack-puppet/modules \ - $TAGS \ - $CHECK_MODE \ - /etc/config.pp \ - 2>&1 | logger -s -t puppet-user -rc=${PIPESTATUS[0]} -[ "$DEBUG" = "false" ] && set +x -set -e -if [ $rc -ne 2 -a $rc -ne 0 ]; then - exit $rc -fi - -verbosity="" -[ "$DEBUG" = "false" ] || verbosity="-v" - -# Disables archiving -if [ -z "$NO_ARCHIVE" ]; then - archivedirs=("/etc" "/root" "/opt" "/var/www" "/var/spool/cron" "/var/lib/nova/.ssh") - rsync_srcs="" - for d in "${archivedirs[@]}"; do - if [ -d "$d" ]; then - rsync_srcs+=" $d" - fi - done - # On stack update, if a password was changed in a config file, - # some services (e.g. mysql) must change their internal state - # (e.g. password in mysql DB) when tripleo_container_manage restarts them; - # and they need the old password to achieve that. - # For those services, we update the config hash to notify - # tripleo_container_manage that a restart is needed, but we do not update - # the password file in container-puppet if the file already existed - # before and let the service regenerate it instead. - password_files="/root/.my.cnf" - - exclude_files="" - for p in $password_files; do - if [ -f "$p" -a -f "${conf_data_path}$p" ]; then - exclude_files+=" --exclude=$p" - fi - done - - # Exclude read-only mounted directories/files which we do not want - # to copy or delete. - ro_files="/etc/puppet/ /etc/puppetlabs/ /opt/puppetlabs/ /etc/pki/ca-trust/extracted " - ro_files+="/etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ca-bundle.crt " - ro_files+="/etc/pki/tls/certs/ca-bundle.trust.crt /etc/pki/tls/cert.pem " - ro_files+="/etc/hosts /etc/localtime /etc/hostname " - # /etc/openldap is bind mounted with "ro" option in keystone containers. - ro_files+="/etc/openldap" - for ro in $ro_files; do - if [ -e "$ro" ]; then - exclude_files+=" --exclude=$ro" - fi - done - - echo "Evaluating config files to be removed for the $NAME configuration" - TMPFILE=$(mktemp /tmp/tmp.XXXXXXXXXX) - TMPFILE2=$(mktemp /tmp/tmp.XXXXXXXXXX) - trap 'rm -rf $TMPFILE $TMPFILE2' EXIT INT HUP - rsync -av -R --dry-run --delete-after $exclude_files $rsync_srcs ${conf_data_path} |\ - awk '/^deleting/ {print $2}' > $TMPFILE - - echo "Rsyncing config files from ${rsync_srcs} into ${conf_data_path}" - rsync -a $verbosity -R --delay-updates --delete-after $exclude_files $rsync_srcs ${conf_data_path} - - # Also make a copy of files modified during puppet run - echo "Gathering files modified after $(stat -c '%y' $origin_of_time)" - - # Purge obsoleted contents to maintain a fresh and filtered mirror - puppet_generated_path=/var/lib/config-data/puppet-generated/${NAME} - mkdir -p ${puppet_generated_path} - echo "Ensuring the removed config files are also purged in ${puppet_generated_path}:" - cat $TMPFILE | sort - cat $TMPFILE | xargs -n1 -r -I{} \ - bash -c "rm -rf ${puppet_generated_path}/{}" - exec 5>&1 - exec 1>$TMPFILE2 - find $rsync_srcs -newer $origin_of_time -not -path '/etc/puppet*' -print0 - exec 1>&5 - echo "Files modified during puppet run:" - cat $TMPFILE2 | xargs -0 printf "%s\n" | sort -h - echo "Rsyncing the modified files into ${puppet_generated_path}" - rsync -a $verbosity -R -0 --delay-updates --delete-after $exclude_files \ - --files-from=$TMPFILE2 / ${puppet_generated_path} - - # Cleanup any special files that might have been copied into place - # previously because fixes for LP#1860607 did not cleanup and required - # manual intervention if a container hit this. We can safely remove these - # files because they should be bind mounted into containers - for ro in $ro_files; do - if [ -e "${puppet_generated_path}/${ro}" ]; then - rm -rf "${puppet_generated_path}/${ro}" - fi - done - - # Write a checksum of the config-data dir, this is used as a - # salt to trigger container restart when the config changes - # note: while being excluded from the output, password files - # are still included in checksum computation - additional_checksum_files="" - excluded_original_passwords="" - for p in $password_files; do - if [ -f "$p" ]; then - additional_checksum_files+=" $p" - excluded_original_passwords+=" --exclude=/var/lib/config-data/*${p}" - fi - done - # We need to exclude the swift rings and backups as those change over time - # and containers do not need to restart if they change - EXCLUDE=--exclude='*/etc/swift/backups/*'\ --exclude='*/etc/swift/*.ring.gz'\ --exclude='*/etc/swift/*.builder'\ --exclude='*/etc/libvirt/passwd.db'\ ${excluded_original_passwords} - - # We need to repipe the tar command through 'tar xO' to force text - # output because otherwise the sed command cannot work. The sed is - # needed because puppet puts timestamps as comments in cron and - # parsedfile resources, hence triggering a change at every redeploy - tar -c --mtime='1970-01-01' $EXCLUDE -f - ${conf_data_path} $additional_checksum_files | tar xO | \ - sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${conf_data_path}.md5sum - tar -c --mtime='1970-01-01' $EXCLUDE -f - ${puppet_generated_path} $additional_checksum_files --mtime='1970-01-01' | tar xO \ - | sed '/^#.*HEADER.*/d; s/\#.HEADER.*//g' | md5sum | awk '{print $1}' > ${puppet_generated_path}.md5sum -fi diff --git a/common/deploy-steps-playbooks-common.yaml b/common/deploy-steps-playbooks-common.yaml deleted file mode 100644 index f53b00a466..0000000000 --- a/common/deploy-steps-playbooks-common.yaml +++ /dev/null @@ -1,101 +0,0 @@ ---- -# If a new play with potential facts gathering is added, make sure we set: -# any_errors_fatal: false -# ignore_unreachable: true -# So we don't break the MaxFailPercentage feature since these plays can't use -# Ansible strategies. - -- hosts: localhost - name: Check if required variables are defined - tasks: - - ansible.builtin.fail: - msg: deploy_source_host variable is not defined - when: deploy_source_host is not defined - - ansible.builtin.fail: - msg: deploy_target_host variable is not defined - when: deploy_target_host is not defined - # Keep in mind that you will still need to use defaults in order to be - # able to pass Ansible own syntax check for blocks like 'hosts'. - -- hosts: all - name: Clear cached facts - # We don't want to gather facts, just clear them - gather_facts: false - tasks: - - ansible.builtin.meta: clear_facts - tags: - - facts - -- hosts: "{{ deploy_source_host | default([]) }}:{{ deploy_target_host | default([]) }}" - name: Gather facts - strategy: tripleo_free - gather_facts: true - tasks: - - name: Set legacy facts - ansible.builtin.set_fact: - ansible_distribution: "{{ ansible_facts['distribution'] }}" - ansible_distribution_major_version: "{{ ansible_facts['distribution_major_version'] }}" - ansible_distribution_release: "{{ ansible_facts['distribution_release'] }}" - ansible_distribution_version: "{{ ansible_facts['distribution_version'] }}" - ansible_os_family: "{{ ansible_facts['os_family'] }}" - tags: - - facts - -- hosts: all - name: Load global variables - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - ansible.builtin.include_vars: global_vars.yaml # noqa: missing-import - no_log: true - tags: - - always - -- hosts: all - name: Include extra variables from files - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - name: Include Service VIP vars - ansible.builtin.include_vars: - file: "{{ playbook_dir }}/service_vip_vars.yaml" - name: service_vip_vars - ignore_errors: true - - name: Include OVN bridge MAC address variables - ansible.builtin.include_vars: - file: "{{ playbook_dir }}/ovn_bridge_mac_address_vars.yaml" - name: ovn_bridge_mac_address_vars - ignore_errors: true - tags: - - always - -- hosts: "{{ deploy_target_host | default([]) }}" - name: Render all_nodes data as group_vars for overcloud - gather_facts: "{{ gather_facts | default(false) }}" - # we don't skip errors or ignore unreachable on this one because it's - # delegated to localhost. localhost should never be unreachable (tm) - any_errors_fatal: True - tasks: - - name: Render all_nodes data as group_vars for overcloud - delegate_to: localhost - become: false - run_once: true - tripleo_all_nodes_data: - tags: - - facts - - always - -- hosts: "{{ deploy_target_host | default([]) }}" - name: Set all_nodes data as group_vars for overcloud - gather_facts: "{{ gather_facts | default(false) }}" - any_errors_fatal: false - ignore_unreachable: true - tasks: - - name: Set all_nodes data as group_vars for overcloud - ansible.builtin.include_vars: "{{ playbook_dir }}/group_vars/overcloud.json" - no_log: true - tags: - - facts - - always diff --git a/common/deploy-steps-tasks-step-0.yaml b/common/deploy-steps-tasks-step-0.yaml deleted file mode 100644 index 747fef834d..0000000000 --- a/common/deploy-steps-tasks-step-0.yaml +++ /dev/null @@ -1,46 +0,0 @@ -- name: Deploy step tasks for step 0 - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Deploy step tasks for step 0' to resume from this task - when: "tripleo_minor_update is not defined or tripleo_minor_update != 'true'" -- name: Ensure /var/log/journal exists - become: true - file: path=/var/log/journal state=directory mode=0750 owner=root group=root setype=var_log_t -- name: Check cloud-init status - shell: systemctl is-active cloud-init.service || systemctl is-enabled cloud-init.service - failed_when: false - become: true - register: cloud_init_enabled -- name: Check if cloud-init is disabled via kernel args - command: grep -q cloud-init=disabled /proc/cmdline - failed_when: false - check_mode: false - register: cloud_init_vendor_disabled -- name: Wait for cloud-init to finish, if enabled - community.general.cloud_init_data_facts: - filter: status - register: res - until: > - res.cloud_init_data_facts.status.v1.stage is defined and - not res.cloud_init_data_facts.status.v1.stage - retries: 50 - delay: 5 - when: - - cloud_init_enabled.rc is defined - - cloud_init_enabled.rc == 0 - - cloud_init_vendor_disabled.rc is not defined or cloud_init_vendor_disabled.rc != 0 -- name: Create /var/lib/container-puppet - become: true - no_log: True - file: path=/var/lib/container-puppet state=directory setype=container_file_t selevel=s0 recurse=true -- name: Write container-puppet.sh - become: true - no_log: True - copy: src=container_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.sh force=yes mode=0755 setype=container_file_t -- include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/deploy_steps_tasks_step0.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool diff --git a/common/deploy-steps-tasks-step-1.yaml b/common/deploy-steps-tasks-step-1.yaml deleted file mode 100644 index 5cd62f558c..0000000000 --- a/common/deploy-steps-tasks-step-1.yaml +++ /dev/null @@ -1,196 +0,0 @@ -- name: Delete existing /var/lib/tripleo-config/check-mode directory for check mode - become: true - file: - path: /var/lib/tripleo-config/check-mode - state: absent - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - when: - - ansible_check_mode|bool - check_mode: false - -- name: Create /var/lib/tripleo-config/check-mode directory for check mode - become: true - file: - path: /var/lib/tripleo-config/check-mode - state: directory - setype: container_file_t - selevel: s0 - recurse: true - tags: - - host_config - - container_config - - container_config_tasks - - container_config_scripts - - container_startup_configs - when: - - ansible_check_mode|bool - check_mode: false - -# Puppet manifest for baremetal host configuration -- name: Write the puppet step_config manifest - become: true - no_log: True - copy: - content: "{{ lookup('file', tripleo_role_name + '/step_config.pp', errors='ignore') | default('', True) }}" - dest: /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp - force: true - mode: '0600' - tags: - - host_config - check_mode: false - diff: false - -- name: Diff puppet step_config manifest changes for check mode - command: - diff -uN /var/lib/tripleo-config/puppet_step_config.pp /var/lib/tripleo-config/check-mode/puppet_step_config.pp - register: diff_results - tags: - - host_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff puppet step_config manifest changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - host_config - -# Puppet Containers Config directory used to generate container configs -- name: Create /var/lib/container-puppet - become: true - file: - path: /var/lib/container-puppet - state: directory - setype: container_file_t - selevel: s0 - tags: - - container_config - - container_config_tasks - -- name: Delete existing /var/lib/container-puppet/check-mode for check mode - become: true - file: - path: /var/lib/container-puppet/check-mode - state: absent - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - -- name: Create /var/lib/container-puppet/check-mode for check mode - become: true - file: - path: /var/lib/container-puppet/check-mode - state: directory - setype: container_file_t - selevel: s0 - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - -- name: Write container-puppet.json file - become: true - no_log: True - copy: - content: "{{ lookup('template', tripleo_role_name + '/puppet_config.yaml', errors='ignore') | default([], True) | from_yaml | to_nice_json }}" - dest: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json - force: true - mode: '0600' - tags: - - container_config - check_mode: false - diff: false - -- name: Diff container-puppet.json changes for check mode - command: - diff -uN /var/lib/container-puppet/container-puppet.json /var/lib/container-puppet/check-mode/container-puppet.json - register: diff_results - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff container-puppet.json changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - container_config - -- name: Ensure config hashes are up-to-date for container startup configs - become: true - container_puppet_config: - update_config_hash_only: true - tags: - - container_startup_configs - -- name: Set host puppet debugging fact string - set_fact: - host_puppet_config_debug: "--debug --verbose" - when: - - enable_puppet | bool - - enable_debug | bool - tags: - - host_config - -- name: Check for /etc/puppet/check-mode directory for check mode - stat: - path: /etc/puppet/check-mode - register: check_mode_dir - when: ansible_check_mode|bool - tags: - - host_config - - container_config - -- name: Create /etc/puppet/check-mode/hieradata directory for check mode - become: true - file: - path: /etc/puppet/check-mode/hieradata - state: directory - setype: container_file_t - selevel: s0 - recurse: true - check_mode: false - when: - - ansible_check_mode|bool - - not check_mode_dir.stat.exists - tags: - - host_config - - container_config - -- name: Create puppet check-mode files if they don't exist for check mode - become: true - shell: | - cp -a /etc/puppet/hiera.yaml /etc/puppet/check-mode/hiera.yaml - cp -a /etc/puppet/hieradata/* /etc/puppet/check-mode/hieradata/ - sed -i 's/\/etc\/puppet\/hieradata/\/etc\/puppet\/check-mode\/hieradata/' /etc/puppet/check-mode/hiera.yaml - when: - - ansible_check_mode|bool - - not check_mode_dir.stat.exists - check_mode: false - tags: - - host_config - - container_config diff --git a/common/deploy-steps-tasks.yaml b/common/deploy-steps-tasks.yaml deleted file mode 100644 index be695c91a0..0000000000 --- a/common/deploy-steps-tasks.yaml +++ /dev/null @@ -1,140 +0,0 @@ -##################################################### -# Per step puppet configuration of the baremetal host -##################################################### - -- name: Write the config_step hieradata - become: true - no_log: True - copy: - content: "{{ dict(step=step|int) | to_json }}" - dest: /etc/puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}hieradata/config_step.json - force: true - mode: '0600' - check_mode: false - tags: - - host_config - -- name: Run puppet host configuration for step {{ step }} - become: true - async: 3600 - poll: 0 - when: enable_puppet|bool - shell: >- - set -o pipefail; - puppet apply {{ host_puppet_config_debug | default('') }} - --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules - --detailed-exitcodes - --summarize - --color=false - {{ ansible_check_mode | bool | ternary('--noop', '') }} - {{ ansible_check_mode | bool | ternary('--hiera_config /etc/puppet/check-mode/hiera.yaml', '') }} - /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp - 2>&1 | logger -s -t puppet-user - register: puppet_host_async_result - no_log: true - tags: - - host_config - check_mode: false - -- name: Wait for puppet host configuration to finish - async_status: - jid: "{{ puppet_host_async_result.ansible_job_id }}" - register: puppet_host_outputs - until: puppet_host_outputs.finished - retries: 360 - delay: 10 - failed_when: - - (not puppet_host_outputs.finished) or (puppet_host_outputs.rc is defined and puppet_host_outputs.rc not in [0, 2]) - when: - - not (ansible_check_mode | bool) - - enable_puppet|bool - tags: - - host_config - -- name: "Debug output for task: Run puppet host configuration for step {{ step }}" - debug: - var: puppet_host_outputs.stdout_lines | default([]) | union(puppet_host_outputs.stderr_lines | default([])) - when: - - not ansible_check_mode|bool - - enable_puppet | bool - - puppet_host_outputs.rc is defined - changed_when: puppet_host_outputs.rc == 2 - failed_when: puppet_host_outputs.rc not in [0, 2] - tags: - - host_config - -####################################### -# Pre-cache facts for puppet containers -####################################### -# We don't want the pre-cache tasks to be skipped in dry-run so we force -# check_mode to "no". -# https://bugzilla.redhat.com/show_bug.cgi?id=1738529 -- name: Pre-cache facts for puppet containers - include_role: - name: tripleo_puppet_cache - tags: - - container_config - - container_config_tasks - -####################################### -# Generate config via puppet containers -####################################### - -- name: Include container-puppet tasks (generate config) during step 1 - include_tasks: generate-config-tasks.yaml - when: step|int == 1 - tags: - - container_config - -##################################### -# Per step starting of the containers -##################################### - -- name: Per step starting of the containers using tripleo-ansible - become: true - environment: - TRIPLEO_MINOR_UPDATE: '{{ tripleo_minor_update | default(false) }}' - block: - - name: "Manage containers for step {{ step }} with tripleo-ansible" - include_role: - name: tripleo_container_manage - vars: - tripleo_container_manage_concurrency: 5 - tripleo_container_manage_cli: "{{ container_cli }}" - tripleo_container_manage_config: "/var/lib/tripleo-config/container-startup-config/step_{{ step }}" - tripleo_container_manage_config_id: "tripleo_step{{ step }}" - tripleo_container_manage_debug: "{{ enable_debug | bool }}" - tripleo_container_manage_healthcheck_disabled: "{{ container_healthcheck_disabled | bool }}" - tripleo_container_manage_log_path: "{{ container_log_stdout_path }}" - tripleo_container_manage_config_patterns: '*.json' - tripleo_container_manage_check_puppet_config: true - tags: - - container_startup_configs - -######################################################## -# Bootstrap tasks - run any tasks that have been defined -######################################################## - -- name: "Clean container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}}" - become: true - file: - path: /var/lib/container-puppet/container-puppet-tasks{{step}}.json - state: absent - tags: - - container_config_tasks - -- name: Calculate container_puppet_tasks for {{ansible_facts['hostname'] | lower}} step {{step}} - set_fact: - "{{'host_container_puppet_tasks_' ~ step}}": "{{lookup('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | union([item])}}" - loop: "{{container_puppet_tasks.get('step_' ~ step, [])}}" - when: (groups[item.service_name] | default ([]) | map('extract', hostvars, 'inventory_hostname') | sort | first | lower) == ansible_facts['hostname'] | lower - vars: - container_puppet_tasks: "{{ lookup('file', tripleo_role_name + '/container_puppet_tasks.yaml', errors='ignore') | default({}, True) | from_yaml }}" - tags: - - container_config_tasks - -- name: Include container-puppet tasks for step {{step}} - include_tasks: host-container-puppet-tasks.yaml - when: ('host_container_puppet_tasks_' ~ step) is defined - tags: - - container_config_tasks diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2 deleted file mode 100644 index c3abfeb2ce..0000000000 --- a/common/deploy-steps.j2 +++ /dev/null @@ -1,1330 +0,0 @@ -# certain initialization steps (run in a container) will occur -# on the role marked as primary controller or the first role listed -{%- set primary_role_name = roles[0].name -%} -{%- for role in roles if ('primary' in role.tags and 'controller' in role.tags) -%} - {%- if loop.first -%} - {%- set primary_role_name = role.name -%} - {%- endif -%} -{%- endfor -%} -# primary role is: {{primary_role_name}} -{% set deploy_steps_max = 6 -%} -{% set update_steps_max = 6 -%} -{% set external_update_steps_max = 2 -%} -{% set pre_upgrade_rolling_steps_max = 1 -%} -{% set upgrade_steps_max = 6 -%} -{% set external_upgrade_steps_max = 3 -%} -{% set post_upgrade_steps_max = 4 -%} -{% set post_update_steps_max = 4 -%} -{% set scale_steps_max = 1 -%} - -heat_template_version: wallaby - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - deployment_source_hosts: - default: 'Undercloud' - type: string - description: Host or hostgroup that runs the deployment - deployment_target_hosts: - default: '' - type: string - description: > - Host or hostgroup that consists of the target systems for the deployment. - Defaults to all hosts in the current Heat stack if not set. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - ConfigDebug: - default: false - description: Whether to run config management (e.g. Puppet) in debug mode. - type: boolean - HideSensitiveLogs: - default: true - type: boolean - description: > - Set it to false if you don't want to activate the no_log feature within - ansible modules. - EnablePuppet: - default: true - description: Whether to run the puppet (baremetal) deployment tasks. - type: boolean - EnablePaunch: - default: false - description: > - (DEPRECATED) Whether to run paunch during container deployment tasks. - type: boolean - DockerPuppetDebug: - type: boolean - default: false - description: Set to True to enable debug logging with Puppet Containers - DockerPuppetProcessCount: - type: number - default: 6 - description: Number of concurrent processes to use when running container-puppet to generate config files. - ContainerCli: - type: string - default: 'podman' - description: CLI tool used to manage containers. - constraints: - - allowed_values: ['podman'] - DockerPuppetMountHostPuppet: - type: boolean - default: true - description: Whether containerized puppet executions use modules from the baremetal host. Defaults to true. Can be set to false to consume puppet modules from containers directly. - ContainerLogStdoutPath: - type: string - description: Absolute path for container stdout output (Podman only) - default: /var/log/containers/stdouts - ContainerHealthcheckDisabled: - type: boolean - description: Whether or not we disable the container healthcheck. - default: false - SELinuxMode: - default: 'enforcing' - description: Configures SELinux mode - type: string - constraints: - - allowed_values: [ 'enforcing', 'permissive', 'disabled' ] -{% for role in roles %} - {{role.name}}Count: - description: Number of {{role.name}} nodes to deploy - type: number - default: {{role.CountDefault|default(0)}} -{% endfor %} - ServiceNetMapLower: - description: Mapping of service name to network name - type: json - default: {} - ValidateControllersIcmp: - default: true - description: Validation to ensure that all controllers can be reached with ICMP - type: boolean - ValidateGatewaysIcmp: - default: true - description: Validation to ensure that all gateways can be reached with ICMP - type: boolean - ValidateFqdn: - default: false - description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts. - type: boolean - PingTestGatewayIPsMap: - default: {} - description: A map of role name to list of gateway IP addresses for each network, used to ping test each gateway. - type: json - PingTestIpsMap: - default: '' - description: A map of role name to a space separated list of IP addresses used to ping test each available network interface. - type: json - NetworkSafeDefaults: - default: true - description: Allow to enable/disable safe networking defaults if os-net-config would fail to run with the provided config. - type: boolean - DeployArtifactURLs: - default: [] - description: A list of HTTP URLs containing deployment artifacts to be pulled. - Currently supports tarballs and RPM packages. - type: comma_delimited_list - DeployArtifactFILEs: - default: [] - description: A list of files containing deployment artifacts to be pushed. - Currently supports tarballs and RPM packages. - type: comma_delimited_list - HostsEntry: - default: [] - type: comma_delimited_list - description: A list of entries to be added to /etc/hosts on each node. - AnsibleHostVarsMap: - type: json - default: {} - ContainerCli: - type: string - default: 'podman' - description: CLI tool used to manage containers. - constraints: - - allowed_values: ['podman'] - EnabledServices: - default: [] - type: comma_delimited_list - ControlVirtualIP: - type: string - EnabledNetworks: - type: comma_delimited_list - NetVipMap: - type: json -{%- for network in networks if network.enabled|default(true) %} - {{network.name}}NetName: - default: {{network.name_lower}} - description: The name of the {{network.name_lower}} network. - type: string -{%- endfor %} - CloudNames: - type: json - EnableInternalTLS: - type: boolean - default: false - CloudDomain: - default: 'localdomain' - type: string - description: > - The DNS domain used for the hosts. This must match the - overcloud_domain_name configured on the undercloud. - NovaAdditionalCell: - default: false - description: Whether this is an cell additional to the default cell. - type: boolean - AllNodesExtraMapData: - default: {} - type: json - description: Map of extra data (hieradata) to set on each node. - UndercloudHostsEntries: - default: [] - description: > - List of undercloud hosts entries to be appended to /etc/hosts. The - value is populated with the HEAT_HOSTS entries on the undercloud by - tripleoclient when running deploy. - type: comma_delimited_list - ExtraHostsEntries: - default: [] - description: List of extra hosts entries to be appended to /etc/hosts - type: comma_delimited_list - VipHostsEntries: - default: [] - description: List of VIP (virtual IP) hosts entries to be appended to /etc/hosts - type: comma_delimited_list - KeystoneResourcesConfigs: - description: The keystone resources config. - type: json - default: {} - RootStackName: - description: The name of the stack/plan. - type: string - NetCidrMap: - description: Mapping of CIDRs to network name - type: json - default: {} - -parameter_groups: -- label: deprecated - description: | - The following parameters are deprecated and will be removed. They should not - be relied on for new deployments. If you have concerns regarding deprecated - parameters, please contact the TripleO development team on IRC or the - OpenStack mailing list. - parameters: - - EnablePaunch - -conditions: -{% for role in roles %} - {{role.name}}NonZero: - not: - equals: - - {get_param: {{role.name}}Count} - - 0 -{% endfor %} - -resources: - - PreDeployStepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, pre_deploy_step_tasks] - - [] - {%- endfor %} - - ExternalDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_deploy_tasks] - - [] - {%- endfor %} - - ExternalPostDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_post_deploy_tasks] - - [] - {%- endfor %} - - ScaleTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - get_param: [role_data, {{role.name}}, scale_tasks] - {%- endfor %} - - ExternalUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_update_tasks] - - [] - {%- endfor %} - - ExternalUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - list_concat_unique: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - get_param: [role_data, {{role.name}}, external_upgrade_tasks] - - [] - {%- endfor %} - - BootstrapServerId: - type: OS::Heat::Value - properties: - value: - yaql: - # Use a constant string of "bootstrap_server_id" when there are no - # servers in the primary role, such as in the case when all - # Controllers are blacklisted. No server id's will match the string - # which is what we want when all are blacklisted. - expression: switch($.data = {} => "no_bootstrap_server", $.data != {} => $.data.items().orderBy($[0]).first()[1]) - data: {get_param: [servers, {{primary_role_name}}]} - - # Note, this should be the last step to execute configuration changes. - # Ensure that all ExtraConfigPost steps are executed - # after all the previous deployment steps. - ExtraConfigPost: - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: - map_merge: - {%- for role in roles %} - - if: - - {{role.name}}NonZero - - {get_param: [servers, {{role.name}}]} - - {} - {%- endfor %} - EndpointMap: {get_param: EndpointMap} - - # BEGIN CONFIG STEPS, only on roles -{%- for role in roles %} - # The {{role.name}}PostConfig steps are in charge of - # quiescing all services, i.e. in the Controller case, - # we should run a full service reload. - {{role.name}}PostConfig: - condition: {{role.name}}NonZero - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: ExtraConfigPost - properties: - servers: {get_param: servers} - input_values: - deploy_identifier: {get_param: DeployIdentifier} -{% endfor %} - -outputs: - RoleConfig: - description: Mapping of config data for all roles - value: - global_vars: - deploy_steps_max: {{deploy_steps_max}} - service_net_map: {get_param: ServiceNetMapLower} - validate_controllers_icmp: {get_param: ValidateControllersIcmp} - validate_gateways_icmp: {get_param: ValidateGatewaysIcmp} - validate_fqdn: {get_param: ValidateFqdn} - ping_test_gateway_ips: {get_param: PingTestGatewayIPsMap} - ping_test_ips: {get_param: PingTestIpsMap} - network_safe_defaults: {get_param: NetworkSafeDefaults} - deploy_artifact_urls: {get_param: DeployArtifactURLs} - deploy_artifact_files: {get_param: DeployArtifactFILEs} - hosts_entry: {get_param: HostsEntry} - primary_role_name: {{ primary_role_name }} - deploy_identifier: {get_param: DeployIdentifier} - container_cli: {get_param: ContainerCli} - enabled_services: {get_param: EnabledServices} - control_virtual_ip: {get_param: ControlVirtualIP} - enabled_networks: {get_param: EnabledNetworks} - net_vip_map: {get_param: NetVipMap} - nova_additional_cell: {get_param: NovaAdditionalCell} - hide_sensitive_logs: {get_param: HideSensitiveLogs} -{%- for network in networks if network.enabled|default(true) %} - {{network.name_lower}}_net_name: {get_param: {{network.name}}NetName} -{%- endfor %} - networks: -{%- for network in networks if network.enabled|default(true) %} - {{network.name}}: - name: {get_param: {{network.name}}NetName} - name_lower: {{ network.name_lower }} -{%- endfor %} - network_virtual_ips: - ctlplane: - ip_address: {get_param: [NetVipMap, ctlplane]} - index: 1 -{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %} - {{network.name_lower}}: - ip_address: {get_param: [NetVipMap, {get_param: {{network.name}}NetName}]} - index: {{loop.index + 1}} -{%- endfor %} - cloud_names: {get_param: CloudNames} - enable_internal_tls: {get_param: EnableInternalTLS} - cloud_domain: {get_param: CloudDomain} - all_nodes_extra_map_data: {get_param: AllNodesExtraMapData} - undercloud_hosts_entries: {get_param: UndercloudHostsEntries} - extra_hosts_entries: {get_param: ExtraHostsEntries} - vip_hosts_entries: {get_param: VipHostsEntries} - keystone_resources: {get_param: KeystoneResourcesConfigs} - net_cidr_map: {get_param: NetCidrMap} - common_deploy_steps_playbooks: {get_file: deploy-steps-playbooks-common.yaml} - common_deploy_steps_tasks: {get_file: deploy-steps-tasks.yaml} - common_container_config_scripts: {get_file: common-container-config-scripts.yaml} - hiera_steps_tasks: {get_file: hiera-steps-tasks.yaml} - deploy_steps_tasks_step_0: {get_file: deploy-steps-tasks-step-0.yaml} - common_container_setup_tasks: {get_file: common-container-setup-tasks.yaml} - common_deploy_steps_tasks_step_1: {get_file: deploy-steps-tasks-step-1.yaml} - container_puppet_script: {get_file: ./container-puppet.sh} - generate-config-tasks: {get_file: generate-config-tasks.yaml} - host-container-puppet-tasks: {get_file: host-container-puppet-tasks.yaml} - deploy_steps_0_playbook: -{% block deploy_steps_str_replace_params %} - str_replace: - params: - BOOTSTRAP_SERVER_ID: {get_attr: [BootstrapServerId, value]} - DEPLOY_SOURCE_HOST: {get_param: deployment_source_hosts} - DEPLOY_TARGET_HOST: - if: - - equals: - - {get_param: deployment_target_hosts} - - "" - - {get_param: RootStackName} - - {get_param: deployment_target_hosts} - DEPLOY_IDENTIFIER: {get_param: DeployIdentifier} - ENABLE_DEBUG: {get_param: ConfigDebug} - ENABLE_PUPPET: {get_param: EnablePuppet} - CONTAINER_CLI: {get_param: ContainerCli} - CONTAINER_LOG_STDOUT_PATH: {get_param: ContainerLogStdoutPath} - CONTAINER_HEALTHCHECK_DISABLED: {get_param: ContainerHealthcheckDisabled} - DOCKER_PUPPET_DEBUG: {get_param: DockerPuppetDebug} - DOCKER_PUPPET_PROCESS_COUNT: {get_param: DockerPuppetProcessCount} - DOCKER_PUPPET_MOUNT_HOST_PUPPET: {get_param: DockerPuppetMountHostPuppet} - SELINUX_MODE: {get_param: SELinuxMode} -{% endblock %} - template: | - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External deployment step 0 - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - step: 0 - enable_debug: ENABLE_DEBUG - tasks: - - name: External deployment step 0 - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment step 0' to resume from this task -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "external_deploy_steps_tasks_step0.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - tags: - - external - - external_deploy_steps - - step0 - - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - deploy_steps_selinux_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_linear - name: Manage SELinux - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Set selinux state - become: true - selinux: - policy: targeted - state: SELINUX_MODE - - - hosts: all - strategy: tripleo_linear - name: Generate /etc/hosts - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Configure Hosts Entries - include_role: - name: tripleo_hosts_entries - vars: - tripleo_hosts_entries_undercloud_hosts_entries: "{{ undercloud_hosts_entries }}" - tripleo_hosts_entries_extra_hosts_entries: "{{ extra_hosts_entries }}" - tripleo_hosts_entries_vip_hosts_entries: "{{ vip_hosts_entries }}" -{% endraw %} - deploy_steps_common_roles_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_linear - name: Common roles for TripleO servers - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - # pre_tasks run before any roles in a play, so we use it for the - # named debug task for --start-at-task. - pre_tasks: - - name: Common roles for TripleO servers - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Common roles for TripleO servers' to resume from this task - tasks: - - include_role: - name: tripleo_bootstrap - - include_role: - name: tripleo_ssh_known_hosts - tags: - - common_roles - deploy_steps_0_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Deploy step tasks for step 0 - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: 0 - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: deploy_steps_tasks_step_0.yaml - tags: - - overcloud - - deploy_steps - - step0 - deploy_steps_pre_network_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server pre network steps - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - tasks: -{% raw %} - - name: Server pre-network deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server pre-network deployments' to resume from this task - - import_tasks: hiera_steps_tasks.yaml - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['pre_network_' ~ tripleo_role_name]|default([]) }}" -{% endraw %} - tags: - - overcloud - - pre_deploy_steps - deploy_steps_network_deployments_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server network deployments - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Network Configuration - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Network Configuration' to resume from this task - - name: Run Network Config - import_role: - name: tripleo_network_config - vars: - tripleo_network_config_update: "{{ network_config_update }}" - tripleo_network_config_async_timeout: "{{ async_timeout | default(300) }}" - tripleo_network_config_async_poll: "{{ async_poll | default(3) }}" - tripleo_network_config_hide_sensitive_logs: false - tripleo_network_config_legacy_script: false - tripleo_network_config_safe_defaults: "{{ network_safe_defaults }}|bool" - tags: - - overcloud - - pre_deploy_steps - - network_deploy_steps -{% endraw %} - deploy_steps_network_validation_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server network validation - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: -{% raw %} - - name: Basic Network Validation - include_role: - name: tripleo_nodes_validation - vars: - tripleo_nodes_validation_validate_controllers_icmp: "{{ validate_controllers_icmp }}" - tripleo_nodes_validation_validate_gateway_icmp: "{{ validate_gateways_icmp }}" - tripleo_nodes_validation_validate_fqdn: "{{ validate_fqdn }}" - tripleo_nodes_validation_ping_test_ips: "{{ ping_test_ips.get(tripleo_role_name, '').split() | list | unique }}" - tripleo_nodes_validation_ping_test_gateway_ips: "{{ ping_test_gateway_ips.get(tripleo_role_name, []) }}" - tags: - - overcloud - - pre_deploy_steps - - network_deploy_steps -{% endraw %} - deploy_steps_pre_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server pre deployment steps - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Server pre deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server pre deployments' to resume from this task -{% raw %} - - import_tasks: hiera_steps_tasks.yaml - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['pre_deployments_' ~ tripleo_role_name]|default([]) }}" -{% endraw %} - tags: - - overcloud - - pre_deploy_steps - deploy_steps_host_prep_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Host prep steps - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: -{% raw %} - - name: Host prep steps - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Host prep steps' to resume from this task - - name: Deploy Artifacts - tripleo_push_artifacts: - artifact_urls: "{{ deploy_artifact_urls | default([]) }}" - artifact_paths: "{{ deploy_artifact_files | default([]) }}" - when: - - ((deploy_artifact_urls | default([]) | length) > 0) or - ((deploy_artifact_files | default([]) | length) > 0) - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/host_prep_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - overcloud - - host_prep_steps -{% endraw %} - deploy_steps_container_setup_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Overcloud container setup tasks - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: Overcloud container setup tasks - block: - - name: Overcloud container setup tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Overcloud container setup tasks' to resume from this task - - name: Write config data - include_tasks: common_container_setup_tasks.yaml - when: - - (deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) - tags: - - overcloud - - container_setup_tasks - deploy_steps_pre_deployment_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Pre Deployment Step Tasks - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: -{% raw %} - - name: Pre Deployment Step Tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Pre Deployment Step Tasks' to resume from this task - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/pre_deploy_step_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - overcloud - - pre_deploy_steps_tasks -{% endraw %} - deploy_steps_external_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | -{%- for step in range(1,deploy_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External deployment step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: External deployment step {{step}} - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment step {{step}}' to resume from this task -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "external_deploy_steps_tasks_step{{step}}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - tags: - - external - - external_deploy_steps - - step{{step}} - - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Deploy step tasks for {{step}} - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: Deploy step tasks for {{step}} - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Deploy step tasks for {{step}}' to resume from this task - - name: Write the config_step hieradata for the deploy step {{step}} tasks -{% raw %} - copy: - content: "{{ dict(step=step | int) | to_json }}" - dest: /etc/puppet/hieradata/config_step.json - force: true - mode: '0600' -{% endraw %} -{% if step == 1 %} - - name: Overcloud common bootstrap tasks for step 1 - block: - - name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists" - stat: - path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}" - register: container_startup_configs_json_stat - - name: Write config data at the start of step 1 - include_tasks: common_deploy_steps_tasks_step_1.yaml - when: - - ((deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or - not container_startup_configs_json_stat.stat.exists) -{% endif %} -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/deploy_steps_tasks_step{{ step }}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - - name: Overcloud common deploy step tasks {{step}} - block: - - name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists" - stat: - path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}" - register: container_startup_configs_json_stat - - include_tasks: common_deploy_steps_tasks.yaml - when: (deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or - (container_startup_configs_json_stat is defined and not container_startup_configs_json_stat.stat.exists) - tags: - - overcloud - - deploy_steps - - step{{step}} - -{%- endfor %} - deploy_steps_post_deployment_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - - hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Server Post Deployments - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - name: Server Post Deployments - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'Server Post Deployments' to resume from this task -{% raw %} - - include_tasks: deployments.yaml - vars: - force: false - with_items: "{{ hostvars[inventory_hostname]['post_deployments_' ~ tripleo_role_name]|default([]) }}" - tags: - - overcloud - - post_deploy_steps -{% endraw %} - deploy_steps_post_deploy_tasks_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_linear - name: External deployment Post Deploy tasks - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - name: External deployment Post Deploy tasks - delegate_to: localhost - run_once: true - debug: - msg: Use --start-at-task 'External deployment Post Deploy tasks' to resume from this task - - import_tasks: external_post_deploy_steps_tasks.yaml - tags: - - external - - external_deploy_steps - - external_post_deploy_steps - deploy_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: deploy_steps_0_playbook.yaml - - import_playbook: deploy_steps_selinux_playbook.yaml - - import_playbook: deploy_steps_common_roles_playbook.yaml - - import_playbook: deploy_steps_0_tasks_playbook.yaml - - import_playbook: deploy_steps_pre_network_playbook.yaml - - import_playbook: deploy_steps_network_deployments_playbook.yaml - - import_playbook: deploy_steps_network_validation_playbook.yaml - - import_playbook: deploy_steps_pre_deployment_playbook.yaml - - import_playbook: deploy_steps_host_prep_playbook.yaml - - import_playbook: deploy_steps_container_setup_tasks_playbook.yaml - - import_playbook: deploy_steps_pre_deployment_tasks_playbook.yaml - - import_playbook: deploy_steps_external_deployment_playbook.yaml - - import_playbook: deploy_steps_post_deployment_playbook.yaml - - import_playbook: deploy_steps_post_deploy_tasks_playbook.yaml - external_deploy_steps_tasks: {get_attr: [ExternalDeployTasks, value]} - external_post_deploy_steps_tasks: {get_attr: [ExternalPostDeployTasks, value]} - pre_deploy_step_tasks: {get_attr: [PreDeployStepTasks, value]} - update_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for role in roles %} - - hosts: {{role.name}} - name: Run update - become: true - serial: "{% raw %}{{ update_serial | default({% endraw %}{{ role.update_serial | default(1) }}{% raw %})}}{% endraw %}" - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tripleo_minor_update: true - tripleo_redhat_enforce: {{role.rhsm_enforce|default("true")}} - tasks: - - import_tasks: hiera_steps_tasks.yaml - - import_tasks: common_container_config_scripts.yaml -{%- for step in range(0,update_steps_max) %} - - import_tasks: "{{ role.name }}/update_tasks_step{{ step }}.yaml" - vars: - step: "{{ step }}" -{%- endfor %} -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/host_prep_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool -{% endraw %} - - import_tasks: deploy_steps_tasks_step_0.yaml - vars: - step: 0 - - name: Make sure container configuration is refreshed - import_tasks: common_container_setup_tasks.yaml - - name: Write config data at the start of step 1 - import_tasks: common_deploy_steps_tasks_step_1.yaml -{%- for step in range(1,deploy_steps_max) %} - - import_tasks: common_deploy_steps_tasks.yaml - vars: - step: "{{ step }}" -{%- endfor %} -{%- for step in range(0,post_update_steps_max) %} - - import_tasks: "{{ role.name }}/post_update_tasks_step{{ step }}.yaml" - vars: - step: "{{ step }}" -{%- endfor %} -{%- endfor %} - external_update_steps_tasks: {get_attr: [ExternalUpdateTasks, value]} - external_update_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(external_update_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - name: External update step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_update_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_update_steps -{%- endfor %} -{%- for step in range(1,deploy_steps_max) %} - # putting both update and deploy tasks in the same - # playbook allows influencing the deploy tasks by - # variables "exported" from update tasks - - hosts: DEPLOY_SOURCE_HOST - name: External deploy step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_deploy_steps_tasks.yaml - tags: - - external - - external_deploy_steps - - step{{step}} -{%- endfor %} - pre_upgrade_rolling_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/pre_upgrade_rolling_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - pre_upgrade_rolling_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | -{%- for role in roles %} - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST:{{role.name}}" - deploy_target_host: "DEPLOY_TARGET_HOST" - - hosts: {{role.name}} - name: Run pre-upgrade rolling tasks - serial: {{ role.deploy_serial | default(1) }} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - tasks: - - include_tasks: pre_upgrade_rolling_steps_tasks.yaml - with_sequence: start=0 end={{pre_upgrade_rolling_steps_max-1}} - loop_control: - loop_var: step -{%- endfor %} - upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(0,upgrade_steps_max) %} - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_free - name: Upgrade tasks for step {{step}} - become: true - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - step: '{{step}}' - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - tasks: -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/upgrade_tasks_step{{ step }}.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - tags: - - upgrade_steps - - upgrade_step{{step}} - -{%- endfor %} - post_upgrade_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/post_upgrade_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} - post_upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - - hosts: DEPLOY_TARGET_HOST - strategy: tripleo_free - any_errors_fatal: true - tasks: - - include_tasks: post_upgrade_steps_tasks.yaml - with_sequence: start=0 end={{post_upgrade_steps_max-1}} - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - loop_control: - loop_var: step - external_upgrade_steps_tasks: {get_attr: [ExternalUpgradeTasks, value]} - external_upgrade_steps_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" -{%- for step in range(external_upgrade_steps_max) %} - - hosts: DEPLOY_SOURCE_HOST - strategy: tripleo_free - name: External upgrade step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - # Explicit ansible_python_interpreter to allow connecting - # to different OS releases (EL7/8) while using delegate_to. - ansible_python_interpreter: /usr/libexec/platform-python - step: '{{step}}' - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_upgrade_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_upgrade_steps -{%- endfor %} -{%- for step in range(1,deploy_steps_max) %} - # putting both upgrade and deploy tasks in the same - # playbook allows influencing the deploy tasks by - # variables "exported" from upgrade tasks - - hosts: DEPLOY_SOURCE_HOST - name: External deploy step {{step}} - gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}" - any_errors_fatal: true - become: false - vars: - # Explicit ansible_python_interpreter to allow connecting - # to different OS releases (EL7/8) while using delegate_to. - ansible_python_interpreter: /usr/libexec/platform-python - step: '{{step}}' - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - - import_tasks: external_deploy_steps_tasks.yaml - tags: - - step{{step}} - - external - - external_deploy_steps -{%- endfor %} - scale_steps_tasks: {get_attr: [ScaleTasks, value]} - scale_playbook: -{{ self.deploy_steps_str_replace_params() }} - template: | - # Collect the facts from the overcloud nodes but ignore unreachable - # nodes in the case of a dead node which needs to be part of the - # scale-down operation. - - import_playbook: common_deploy_steps_playbooks.yaml - vars: - deploy_source_host: "DEPLOY_SOURCE_HOST" - deploy_target_host: "DEPLOY_TARGET_HOST" - - - hosts: DEPLOY_TARGET_HOST - name: Scaling - # NOTE(cloudnull): This is set to true explicitly so that we have up-to-date facts - # on all DEPLOY_TARGET_HOST when performing a scaling operation. - # Without up-to-date facts, we're creating a potential failure - # scenario. - gather_facts: true - ignore_unreachable: true - become: false - vars: - bootstrap_server_id: BOOTSTRAP_SERVER_ID - deploy_identifier: DEPLOY_IDENTIFIER - enable_debug: ENABLE_DEBUG - enable_puppet: ENABLE_PUPPET - container_cli: CONTAINER_CLI - container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH - docker_puppet_debug: DOCKER_PUPPET_DEBUG - docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT - docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET - tasks: - # Only run the scale tasks on the nodes that are alive. - # We expect the tasks to work on the nodes that are alive. - # If a task is allowed to fail, it needs to be configured at - # the task definition level but not here. - - include_tasks: scale_steps_tasks.yaml - with_sequence: start=1 end={{scale_steps_max}} - loop_control: - loop_var: step - tags: always - # we use ansible_facts['hostname'] to determine if the host is alive - # or not. - when: ansible_facts['hostname'] is defined - # We don't want to run the scale tasks on dead nodes, to allow - # the operator to scale down the cloud no matter the state of - # the servers. - # However, we notify the operator if the node wasn't reachable. - # Using fail and not debug module to make it more visible - # in the logs. - - fail: - msg: "Node is unreachable. No scale tasks will be run." - ignore_errors: True - tags: always - # we use ansible_facts['hostname'] to determine if the host is alive - # or not. - when: ansible_facts['hostname'] is not defined - tags: - - scale - post_update_steps_tasks: | -{% raw %} - - include_tasks: "{{ _task_file_path }}" - vars: - _task_file_path: "{{ tripleo_role_name }}/post_update_tasks.yaml" - _full_file_path: "{{ playbook_dir }}/{{ _task_file_path }}" - _test_file_exists: "{{ _full_file_path is exists }}" - when: _test_file_exists | bool - tags: - - always -{% endraw %} diff --git a/common/generate-config-tasks.yaml b/common/generate-config-tasks.yaml deleted file mode 100644 index 0e80d3c7d2..0000000000 --- a/common/generate-config-tasks.yaml +++ /dev/null @@ -1,54 +0,0 @@ -- name: Block for container-puppet tasks (generate config) during step {{ step }} with tripleo-ansible - become: true - tags: - - container_config - block: - - name: Create base directory puppet configs - file: - path: "/var/lib/tripleo-config/container-puppet-config" - mode: 0700 - recurse: true - setype: container_file_t - - - name: "Generate container puppet configs for step {{ step }}" - container_puppet_config: - check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}' - config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}" - debug: "{{ docker_puppet_debug | bool }}" - net_host: true - no_archive: false - puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json" - short_hostname: "{{ ansible_facts['hostname'] | lower }}" - step: "{{ step }}" - - - name: "Manage Puppet containers (generate config) for step {{ step }}" - tripleo_container_manage: - config_id: "tripleo_puppet_step{{ step }}" - config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}" - config_patterns: "container-puppet-*.json" - concurrency: "{{ docker_puppet_process_count }}" - -- name: Diff puppet-generated changes for check mode - become: true - shell: | - diff -ruN --no-dereference -q /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated - diff -ruN --no-dereference /var/lib/config-data/puppet-generated /var/lib/config-data/check-mode/puppet-generated - register: diff_results - tags: - - container_config - check_mode: false - when: - - ansible_check_mode|bool - - ansible_diff_mode - failed_when: false - changed_when: diff_results.rc == 1 - -- name: Diff puppet-generated changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: - - ansible_check_mode|bool - - ansible_diff_mode - tags: - - container_config diff --git a/common/hiera-steps-tasks.yaml b/common/hiera-steps-tasks.yaml deleted file mode 100644 index 1cc20b9f25..0000000000 --- a/common/hiera-steps-tasks.yaml +++ /dev/null @@ -1,31 +0,0 @@ -- name: Hiera config - include_role: - name: tripleo_hieradata -- name: Hiera symlink - become: true - file: - src: /etc/puppet/hiera.yaml - dest: /etc/hiera.yaml - state: link - force: true -- name: Hieradata from vars - include_role: - name: tripleo_hieradata - tasks_from: hieradata_vars.yaml - vars: - hieradata_templates_list: - - bootstrap_node - - all_nodes - - vip_data - - net_ip_map - - cloud_domain - - fqdn - - service_names - - service_configs - - extraconfig - - role_extraconfig - - ovn_chassis_mac_map -- name: Ensure ansible_managed hieradata file exists - include_role: - name: tripleo_hieradata - tasks_from: ansible_hieradata.yml diff --git a/common/host-container-puppet-tasks.yaml b/common/host-container-puppet-tasks.yaml deleted file mode 100644 index bde0a16a70..0000000000 --- a/common/host-container-puppet-tasks.yaml +++ /dev/null @@ -1,33 +0,0 @@ -- name: Write container-puppet-tasks json file for {{ansible_facts['hostname'] | lower}} step {{step}} - no_log: True - copy: - content: "{{lookup ('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | to_nice_json}}" - dest: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{step}}.json" - force: true - mode: '0600' - check_mode: false - tags: - - container_config_tasks - -- name: Block for container-puppet tasks (bootstrap tasks) for step {{ step }} with tripleo-ansible - tags: - - container_config_tasks - block: - - name: "Generate container puppet configs for step {{ step }}" - container_puppet_config: - check_mode: '{{ ansible_check_mode | bool | ternary(1, 0) }}' - config_vol_prefix: "/var/lib/config-data{{ ansible_check_mode | bool | ternary('/check-mode', '') }}" - debug: "{{ docker_puppet_debug | bool }}" - net_host: true - no_archive: true - puppet_config: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{ step }}.json" - short_hostname: "{{ ansible_facts['hostname'] | lower }}" - step: "{{ step }}" - - - name: "Manage Puppet containers (bootstrap tasks) for step {{ step }}" - tripleo_container_manage: - config_id: "tripleo_puppet_step{{ step }}" - config_dir: "/var/lib/tripleo-config/container-puppet-config/step_{{ step }}" - config_patterns: 'container-puppet-*.json' - debug: "{{ docker_puppet_debug | bool }}" - concurrency: "{{ docker_puppet_process_count }}" diff --git a/common/post.j2.yaml b/common/post.j2.yaml deleted file mode 100644 index 8a70dfa97b..0000000000 --- a/common/post.j2.yaml +++ /dev/null @@ -1 +0,0 @@ -{% include 'deploy-steps.j2' %} diff --git a/common/services/role.role.j2.yaml b/common/services/role.role.j2.yaml deleted file mode 100644 index fee2ac8215..0000000000 --- a/common/services/role.role.j2.yaml +++ /dev/null @@ -1,443 +0,0 @@ -heat_template_version: wallaby - -description: > - Utility stack to convert an array of services into a set of combined - role configs. - -parameters: - Services: - default: [] - description: | - List nested stack service templates. - type: comma_delimited_list - ServiceData: - default: {} - description: Dictionary packing service data - type: json - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. Use - parameter_merge_strategies to merge it with the defaults. - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - RoleName: - default: '' - description: Role name on which the service is applied - type: string - RoleParameters: - description: Parameters specific to the role - default: {} - type: json - -resources: - - ServiceChain: - type: OS::Heat::ResourceChain - properties: - resources: {get_param: Services} - concurrent: true - resource_properties: - ServiceData: {get_param: ServiceData} - ServiceNetMap: {get_param: ServiceNetMap} - EndpointMap: {get_param: EndpointMap} - RoleName: {get_param: RoleName} - RoleParameters: {get_param: RoleParameters} - - ServiceServerMetadataHook: - type: OS::TripleO::{{role.name}}ServiceServerMetadataHook - properties: - RoleData: {get_attr: [ServiceChain, role_data]} - - PuppetStepConfig: - type: OS::Heat::Value - properties: - type: string - value: - yaql: - expression: - # select 'step_config' only from services that do not have a docker_config - coalesce($.data.service_names, []).zip(coalesce($.data.step_config, []), coalesce($.data.docker_config, [])).where($[2] = null).where($[1] != null).select($[1]).join("\n") - data: - service_names: {get_attr: [ServiceChain, role_data, service_name]} - step_config: {get_attr: [ServiceChain, role_data, step_config]} - docker_config: {get_attr: [ServiceChain, role_data, docker_config]} - - DockerConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'docker_config' only from services that have it - coalesce($.data.service_names, []).zip(coalesce($.data.docker_config, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_attr: [ServiceChain, role_data, service_names]} - docker_config: {get_attr: [ServiceChain, role_data, docker_config]} - - DockerConfigScripts: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'container_config_scripts' only from services that have it - coalesce($.data.service_names, []).zip(coalesce($.data.container_config_scripts, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_attr: [ServiceChain, role_data, service_names]} - container_config_scripts: {get_attr: [ServiceChain, role_data, container_config_scripts]} - - MonitoringSubscriptionsConfig: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('monitoring_subscription')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - ServiceNames: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - filter: - - [null] - - {get_attr: [ServiceChain, role_data, service_name]} - - GlobalConfigSettings: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('global_config_settings')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - KeystoneResourcesConfigs: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('keystone_resources')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - CoreServices: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('core_services')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - ServiceConfigSettings: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: coalesce($.data.role_data, []).where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - DeployStepsTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('deploy_steps_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_deploy_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalPostDeployTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_post_deploy_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ScaleTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('scale_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - ExternalUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('external_upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PreUpgradeRollingTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('pre_upgrade_rolling_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PostUpgradeTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('post_upgrade_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PostUpdateTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks, e.g yum update for all services - expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - UpgradeBatchTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PuppetConfig: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - expression: coalesce($.data, []).where($ != null).select($.get('puppet_config')).where($ != null).distinct() - data: {get_attr: [ServiceChain, role_data]} - - KollaConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: coalesce($.data.role_data, []).where($ != null).select($.get('kolla_config')).where($ != null).reduce($1.mergeWith($2), {}) - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - # DEPRECATED in favor of ContainerPuppetTasks. - DockerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - # This extracts docker_puppet_tasks for each step, - # and merges {service_name: the_service} for each item, so we - # can later filter based on the bootstrap hostname for the service - expression: dict(coalesce($.data, []).where($ != null).select([$.get('docker_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1])) - data: {get_attr: [ServiceChain, role_data]} - - ContainerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - # This extracts container_puppet_tasks for each step, - # and merges {service_name: the_service} for each item, so we - # can later filter based on the bootstrap hostname for the service - expression: dict(coalesce($.data, []).where($ != null).select([$.get('container_puppet_tasks'), $.get('service_name')]).where($[0] != null).select([$[0], dict($[0].keys().zip(dict(service_name=>$[1]).repeat(len($[0].keys()))))]).select($[0].mergeWith($[1])).selectMany($.items()).groupBy($[0], $[1])) - data: {get_attr: [ServiceChain, role_data]} - - HostFirewallTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('host_firewall_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - HostPrepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - PreDeployStepTasks: - type: OS::Heat::Value - properties: - type: comma_delimited_list - value: - yaql: - # Note we use distinct() here to filter any identical tasks - expression: coalesce($.data, []).where($ != null).select($.get('pre_deploy_step_tasks')).where($ != null).flatten().distinct() - data: {get_attr: [ServiceChain, role_data]} - - AnsibleGroupVars: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('ansible_group_vars')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - FirewallRules: - type: OS::Heat::Value - properties: - type: json - value: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - FirewallFrontendRules: - type: OS::Heat::Value - properties: - type: json - value: - frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - ssl_frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_ssl_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - edge_frontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - edge_sslfrontend: - map_merge: - yaql: - expression: list(coalesce($.data.role_data, []).where($ != null).select($.get('firewall_edge_ssl_frontend_rules')).where($ != null)).flatten() - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - -outputs: - role_data: - description: Combined Role data for this set of services. - value: - service_names: {get_attr: [ServiceNames, value]} - monitoring_subscriptions: {get_attr: [MonitoringSubscriptionsConfig, value]} - config_settings: - yaql: - expression: $.data.where($ != null).reduce($1.mergeWith($2), {}) - data: {get_attr: [ServiceChain, role_data, config_settings]} - global_config_settings: {get_attr: [GlobalConfigSettings, value]} - service_config_settings: {get_attr: [ServiceConfigSettings, value]} - step_config: {get_attr: [PuppetStepConfig, value]} - deploy_steps_tasks: {get_attr: [DeployStepsTasks, value]} - external_deploy_tasks: {get_attr: [ExternalDeployTasks, value]} - external_post_deploy_tasks: {get_attr: [ExternalPostDeployTasks, value]} - external_update_tasks: {get_attr: [ExternalUpdateTasks, value]} - external_upgrade_tasks: {get_attr: [ExternalUpgradeTasks, value]} - scale_tasks: {get_attr: [ScaleTasks, value]} - pre_upgrade_rolling_tasks: {get_attr: [PreUpgradeRollingTasks, value]} - upgrade_tasks: {get_attr: [UpgradeTasks, value]} - post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]} - update_tasks: {get_attr: [UpdateTasks, value]} - post_update_tasks: {get_attr: [PostUpdateTasks, value]} - upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]} - service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]} - ansible_group_vars: - map_merge: - - ansible_async_dir: "/tmp/.ansible_async" - - {get_attr: [AnsibleGroupVars, value]} - keystone_resources: {get_attr: [KeystoneResourcesConfigs, value]} - - # Firewall rules - firewall_rules: {get_attr: [FirewallRules, value]} - firewall_frontend_rules: {get_attr: [FirewallFrontendRules, value, frontend]} - firewall_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, ssl_frontend]} - firewall_edge_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_frontend]} - firewall_edge_ssl_frontend_rules: {get_attr: [FirewallFrontendRules, value, edge_ssl_frontend]} - - # Keys to support docker/services - puppet_config: {get_attr: [PuppetConfig, value]} - kolla_config: {get_attr: [KollaConfig, value]} - docker_config: {get_attr: [DockerConfig, value]} - container_config_scripts: {get_attr: [DockerConfigScripts, value]} - container_puppet_tasks: - map_merge: - - {get_attr: [ContainerPuppetTasks, value]} - - {get_attr: [DockerPuppetTasks, value]} - host_prep_tasks: - list_concat: - - {get_attr: [HostFirewallTasks, value]} - - {get_attr: [HostPrepTasks, value]} - pre_deploy_step_tasks: {get_attr: [PreDeployStepTasks, value]} diff --git a/config-download-software.yaml b/config-download-software.yaml deleted file mode 100644 index 3b60f7322e..0000000000 --- a/config-download-software.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: wallaby - -parameters: - - name: - type: string - default: "" - - actions: - type: comma_delimited_list - default: ['CREATE', 'UPDATE'] - - server: - type: string - - config: - type: string - - input_values: - type: json - description: input values for the software deployments - default: {} - - input_key: - type: string - default: 'get_input' - - signal_transport: - type: string - default: 'CFN_SIGNAL' - - input_values_validate: - type: string - default: 'LAX' - -resources: - - TripleODeployment: - type: OS::Heat::Value - properties: - value: - name: {get_param: name} - server: {get_param: server} - config: {get_param: config} - input_values: {get_param: input_values} - deployment: {get_resource: TripleOSoftwareDeployment} - - TripleOSoftwareDeployment: - type: OS::Heat::SoftwareDeployment - properties: - name: deployment_resource - config: {get_param: config} - server: 'fake_server_id' - input_values: {get_param: input_values} - signal_transport: NO_SIGNAL - actions: {get_param: actions} - -outputs: - - deploy_status_code: - value: 0 - deploy_stderr: - value: '' - deploy_stdout: - value: '' - show: - value: '' - - update_managed_packages: - description: boolean value indicating whether to upgrade managed packages - value: false - hostname: - description: hostname - value: '' diff --git a/config-download-structured.yaml b/config-download-structured.yaml deleted file mode 100644 index 0f1d812a5f..0000000000 --- a/config-download-structured.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: wallaby - -parameters: - - name: - type: string - default: "" - - actions: - type: comma_delimited_list - default: ['CREATE', 'UPDATE'] - - server: - type: string - - config: - type: string - - input_values: - type: json - description: input values for the software deployments - default: {} - - input_key: - type: string - default: 'get_input' - - signal_transport: - type: string - default: 'CFN_SIGNAL' - - input_values_validate: - type: string - default: 'LAX' - -resources: - - TripleODeployment: - type: OS::Heat::Value - properties: - value: - name: {get_param: name} - server: {get_param: server} - config: {get_param: config} - input_values: {get_param: input_values} - deployment: {get_resource: TripleOSoftwareDeployment} - - TripleOSoftwareDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: deployment_resource - config: {get_param: config} - server: 'fake_server_id' - input_values: {get_param: input_values} - signal_transport: NO_SIGNAL - actions: {get_param: actions} - -outputs: - - deploy_status_code: - value: 0 - deploy_stderr: - value: '' - deploy_stdout: - value: '' - show: - value: '' - - update_managed_packages: - description: boolean value indicating whether to upgrade managed packages - value: false - hostname: - description: hostname - value: '' diff --git a/container_config_scripts/__init__.py b/container_config_scripts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/container_config_scripts/monitoring/collectd_check_health.py b/container_config_scripts/monitoring/collectd_check_health.py deleted file mode 100755 index 8331c6af0a..0000000000 --- a/container_config_scripts/monitoring/collectd_check_health.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2022 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import shutil -import subprocess -import sys - - -SOCKET = "unix:/run/podman/podman.sock" -FORMAT = ("{service: .Name, container: .Id, status: .State.Running, " - "healthy: .State.Health.Status}") -SKIP_LIST = ['_bootstrap', 'container-puppet-', '_db_sync', - '_ensure_', '_fix_', '_init_', '_map_', '_wait_', - 'mysql_data_ownership', 'configure_cms_options'] - - -def execute(cmd, workdir: str = None, - prev_proc: subprocess.Popen = None) -> subprocess.Popen: - # Note(mmagr): When this script is executed by collectd-sensubility started - # via collectd the script has non-root permission but inherits - # environment from collectd with root permission. We need - # to avoid sensubility access /root when using podman-remote. - # See https://bugzilla.redhat.com/show_bug.cgi?id=2091076 for - # more info. - proc_env = os.environ.copy() - proc_env["HOME"] = "/tmp" - if type(cmd[0]) is list: # multiple piped commands - last = prev_proc - for c in cmd: - last = execute(c, workdir, last) - return last - else: # single command - inpipe = prev_proc.stdout if prev_proc is not None else None - proc = subprocess.Popen(cmd, cwd=workdir, env=proc_env, stdin=inpipe, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if prev_proc is not None: - prev_proc.stdout.close() - prev_proc.stderr.close() - return proc - - -def fetch_container_health(containers): - out = [] - for cont in set(containers.split('\n')) - set(SKIP_LIST): - if not cont: - continue - proc = execute([ - [shutil.which('podman-remote'), - '--url', SOCKET, 'inspect', cont], - [shutil.which('jq'), '.[] | %s' % FORMAT] - ]) - o, e = proc.communicate() - if proc.returncode != 0: - msg = "Failed to fetch status of %s: %s" % (cont, e.decode()) - return proc.returncode, msg - - item = json.loads(o.decode()) - if len(item['healthy']) > 0: - item['status'] = item['healthy'] - else: - item['status'] = 'running' if item['status'] else 'stopped' - - item['healthy'] = int(item['healthy'] == 'healthy') - out.append(item) - return 0, out - - -if __name__ == "__main__": - proc = execute([shutil.which('podman-remote'), '--url', SOCKET, - 'ps', '--all', '--format', '{{.Names}}']) - o, e = proc.communicate() - if proc.returncode != 0: - print("Failed to list containers:\n%s\n%s" % (o.decode(), e.decode())) - sys.exit(1) - - rc, status = fetch_container_health(o.decode()) - if rc != 0: - print("Failed to inspect containers:\n%s" % status) - sys.exit(rc) - print(json.dumps(status)) diff --git a/container_config_scripts/mysql_upgrade_db.sh b/container_config_scripts/mysql_upgrade_db.sh deleted file mode 100755 index 610bf1307a..0000000000 --- a/container_config_scripts/mysql_upgrade_db.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -e - -# Wait until we know the mysql server is up and responding -timeout ${DB_MAX_TIMEOUT:-60} /bin/bash -c 'until mysqladmin -uroot ping 2>/dev/null; do sleep 1; done' - -# After an upgrade, make sure that the running mysql had a chance to -# update its data table on disk. -mysql_upgrade - -# Upgrade to 10.3: the default table row format changed from COMPACT -# to DYNAMIC, so upgrade the existing tables. -compact_tables=$(mysql -se 'SELECT CONCAT("`",TABLE_SCHEMA,"`.`",TABLE_NAME,"`") FROM information_schema.tables WHERE ENGINE = "InnoDB" and ROW_FORMAT = "Compact";'); -for i in $compact_tables; do echo converting row format of table $i; mysql -e "ALTER TABLE $i ROW_FORMAT=DYNAMIC;"; done; diff --git a/container_config_scripts/nova_api_ensure_default_cells.py b/container_config_scripts/nova_api_ensure_default_cells.py deleted file mode 100644 index 8e41e3eb7b..0000000000 --- a/container_config_scripts/nova_api_ensure_default_cells.py +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2022 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from configparser import ConfigParser -import logging -import os -import subprocess -import sys -from urllib import parse as urlparse - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -logging.basicConfig(stream=sys.stdout, level=loglevel) -LOG = logging.getLogger('nova_api_ensure_default_cells') - -NOVA_CFG = '/etc/nova/nova.conf' -CELL0_ID = '00000000-0000-0000-0000-000000000000' -DEFAULT_CELL_NAME = 'default' - - -def template_netloc_credentials(netloc, index=None): - if '@' in netloc: - userpass, hostport = netloc.split('@', 1) - has_pass = ':' in userpass - if index is None: - cred_template = '{username}' - if has_pass: - cred_template += ':{password}' - else: - cred_template = '{{username{index}}}'.format(index=index) - if has_pass: - cred_template += ':{{password{index}}}'.format(index=index) - return '@'.join((cred_template, hostport)) - else: - return netloc - - -def template_url(url): - parsed = urlparse.urlparse(url) - if ',' in parsed.netloc: - orig_netlocs = parsed.netloc.split(',') - templ_netlocs = [] - index = 0 - for netloc in orig_netlocs: - index += 1 - templ_netlocs.append(template_netloc_credentials(netloc, index)) - new_netloc = ','.join(templ_netlocs) - else: - new_netloc = template_netloc_credentials(parsed.netloc) - return parsed._replace(netloc=new_netloc).geturl() - - -def parse_list_cells(list_cells_output): - list_cells_lines = list_cells_output.split('\n') - if len(list_cells_lines) < 5: - raise ValueError('Invalid nova-manage cell_v2 list_cells output') - - data_rows = list_cells_lines[3:-2] - by_name = {} - by_uuid = {} - - for row in data_rows: - parts = row.split('|') - entry = { - 'name': parts[1].strip(), - 'uuid': parts[2].strip(), - 'transport_url': parts[3].strip(), - 'database_connection': parts[4].strip(), - } - by_name[entry['name']] = entry - by_uuid[entry['uuid']] = entry - - return by_name, by_uuid - - -def create_or_update_default_cells(cell0_db, default_db, default_transport_url): - list_cells_cmd = ['/usr/bin/nova-manage', 'cell_v2', 'list_cells', '--verbose'] - list_cells_output = subprocess.check_output(list_cells_cmd, encoding='utf-8') - cells_by_name, cells_by_uuid = parse_list_cells(list_cells_output) - - if CELL0_ID in cells_by_uuid: - LOG.info('Setting cell0 database connection to \'{}\''.format(cell0_db)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'update_cell', - '--cell_uuid', CELL0_ID, - '--database_connection', cell0_db, - '--transport-url', 'none:///' - ] - else: - LOG.info('Creating cell0 with database connection \'{}\''.format(cell0_db)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'map_cell0', - '--database_connection', cell0_db - ] - subprocess.check_call(cmd) - - if DEFAULT_CELL_NAME in cells_by_name: - LOG.info('Setting default cell database connection to \'{}\' and transport url to \'{}\''.format( - default_db, default_transport_url)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'update_cell', - '--cell_uuid', cells_by_name[DEFAULT_CELL_NAME]['uuid'], - '--database_connection', default_db, - '--transport-url', default_transport_url - ] - else: - LOG.info('Creating default cell with database connection \'{}\' and transport url \'{}\''.format( - default_db, default_transport_url)) - cmd = [ - '/usr/bin/nova-manage', 'cell_v2', 'create_cell', - '--name', DEFAULT_CELL_NAME, - '--database_connection', default_db, - '--transport-url', default_transport_url - ] - subprocess.check_call(cmd) - - -def replace_db_name(db_url, db_name): - return urlparse.urlparse(db_url)._replace(path=db_name).geturl() - - -if __name__ == '__main__': - if os.path.isfile(NOVA_CFG): - try: - config.read(NOVA_CFG) - except Exception: - LOG.exception('Error while reading nova.conf:') - sys.exit(1) - else: - LOG.error('Nova configuration file %s does not exist', NOVA_CFG) - sys.exit(1) - - default_database_connection = config.get('database', 'connection') - cell0_database_connection = replace_db_name(default_database_connection, 'nova_cell0') - default_transport_url = config.get('DEFAULT', 'transport_url') - - create_or_update_default_cells( - template_url(cell0_database_connection), - template_url(default_database_connection), - template_url(default_transport_url) - ) diff --git a/container_config_scripts/nova_libvirt_init_secret.sh b/container_config_scripts/nova_libvirt_init_secret.sh deleted file mode 100755 index 52f9e491ea..0000000000 --- a/container_config_scripts/nova_libvirt_init_secret.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -set -e - -CEPH_INFO=($*) - -if [ -z "$CEPH_INFO" ]; then - echo "error: At least one CLUSTER:CLIENT tuple must be specified" - exit 1 -fi - -echo "------------------------------------------------" -echo "Initializing virsh secrets for: ${CEPH_INFO[@]}" - -for INFO in ${CEPH_INFO[@]}; do - IFS=: read CLUSTER CLIENT <<< $INFO - if [ ! -f /etc/ceph/${CLUSTER}.conf ]; then - echo "Error: /etc/ceph/${CLUSTER}.conf was not found" - echo "Path to nova_libvirt_init_secret was ${CEPH_INFO}" - exit 1 - fi - FSID=$(awk '$1 == "fsid" {print $3}' /etc/ceph/${CLUSTER}.conf) - if [ -z "${FSID}" ]; then - echo "Error: /etc/ceph/${CLUSTER}.conf contained an empty fsid definition" - echo "Check your ceph configuration" - exit 1 - fi - - echo "--------" - echo "Initializing the virsh secret for '$CLUSTER' cluster ($FSID) '$CLIENT' client" - - # Ensure the secret XML file exists. Puppet should have created a secret.xml - # file for the first cluster's secret, so detect when to use that file. - if grep -q $FSID /etc/nova/secret.xml; then - SECRET_FILE="/etc/nova/secret.xml" - SECRET_NAME="client.${CLIENT} secret" - else - SECRET_FILE="/etc/nova/${CLUSTER}-secret.xml" - SECRET_NAME="${CLUSTER}.client.${CLIENT} secret" - fi - - if [ ! -f $SECRET_FILE ]; then - echo "Creating $SECRET_FILE" - cat < $SECRET_FILE - - - ${SECRET_NAME} - - ${FSID} - -EOF - else - echo "The $SECRET_FILE file already exists" - fi - - # Ensure the libvirt secret is defined - if /usr/bin/virsh secret-list | grep -q $FSID; then - echo "The virsh secret for $FSID has already been defined" - else - /usr/bin/virsh secret-define --file $SECRET_FILE - fi - - # Fetch the key from the keyring and ensure the secret is set - KEY=$(awk '$1 == "key" {print $3}' /etc/ceph/${CLUSTER}.client.${CLIENT}.keyring) - if /usr/bin/virsh secret-get-value $FSID 2>/dev/null | grep -q $KEY; then - echo "The virsh secret for $FSID has already been set" - else - /usr/bin/virsh secret-set-value --secret $FSID --base64 $KEY - fi -done diff --git a/container_config_scripts/nova_statedir_ownership.py b/container_config_scripts/nova_statedir_ownership.py deleted file mode 100644 index 2978a32f4d..0000000000 --- a/container_config_scripts/nova_statedir_ownership.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import os -import pwd -import selinux -import stat -import sys - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -logging.basicConfig(stream=sys.stdout, level=loglevel) -LOG = logging.getLogger('nova_statedir') - - -class PathManager(object): - """Helper class to manipulate ownership of a given path""" - def __init__(self, path): - self.path = path - self.uid = None - self.gid = None - self.is_dir = None - self.secontext = None - self._update() - - def _update(self): - try: - statinfo = os.stat(self.path) - self.is_dir = stat.S_ISDIR(statinfo.st_mode) - self.uid = statinfo.st_uid - self.gid = statinfo.st_gid - self.secontext = selinux.lgetfilecon(self.path)[1] - except Exception: - LOG.exception('Could not update metadata for %s', self.path) - raise - - def __str__(self): - return "uid: {} gid: {} path: {}{}".format( - self.uid, - self.gid, - self.path, - '/' if self.is_dir else '' - ) - - def has_owner(self, uid, gid): - return self.uid == uid and self.gid == gid - - def has_either(self, uid, gid): - return self.uid == uid or self.gid == gid - - def chown(self, uid, gid): - target_uid = -1 - target_gid = -1 - if self.uid != uid: - target_uid = uid - if self.gid != gid: - target_gid = gid - if (target_uid, target_gid) != (-1, -1): - LOG.info('Changing ownership of %s from %d:%d to %d:%d', - self.path, - self.uid, - self.gid, - self.uid if target_uid == -1 else target_uid, - self.gid if target_gid == -1 else target_gid) - try: - os.chown(self.path, target_uid, target_gid) - self._update() - except Exception: - LOG.exception('Could not change ownership of %s: ', - self.path) - raise - else: - LOG.info('Ownership of %s already %d:%d', - self.path, - uid, - gid) - - def chcon(self, context): - # If dir returns whether to recursively set context - try: - try: - selinux.lsetfilecon(self.path, context) - LOG.info('Setting selinux context of %s to %s', - self.path, context) - return True - except OSError as e: - if self.is_dir and e.errno == 95: - # Operation not supported, assume NFS mount and skip - LOG.info('Setting selinux context not supported for %s', - self.path) - return False - else: - raise - except Exception: - LOG.exception('Could not set selinux context of %s to %s:', - self.path, context) - raise - - -class NovaStatedirOwnershipManager(object): - """Class to manipulate the ownership of the nova statedir (/var/lib/nova). - - The nova uid/gid differ on the host and container images. An upgrade - that switches from host systemd services to docker requires a change in - ownership. Previously this was a naive recursive chown, however this - causes issues if nova instance are shared via an NFS mount: any open - filehandles in qemu/libvirt fail with an I/O error (LP1778465). - - Instead the upgrade/FFU ansible tasks now lay down a marker file when - stopping and disabling the host systemd services. We use this file to - determine the host nova uid/gid. We then walk the tree and update any - files that have the host uid/gid to the docker nova uid/gid. As files - owned by root/qemu etc... are ignored this avoids the issues with open - filehandles. The marker is removed once the tree has been walked. - - For subsequent runs, or for a new deployment, we simply ensure that the - docker nova user/group owns all directories. This is required as the - directories are created with root ownership in host_prep_tasks (the - docker nova uid/gid is not known in this context). - """ - def __init__(self, statedir, upgrade_marker='upgrade_marker', - nova_user='nova', secontext_marker='../_nova_secontext', - exclude_paths=None): - self.statedir = statedir - self.nova_user = nova_user - - self.upgrade_marker_path = os.path.join(statedir, upgrade_marker) - self.secontext_marker_path = os.path.normpath(os.path.join(statedir, secontext_marker)) - self.upgrade = os.path.exists(self.upgrade_marker_path) - - self.exclude_paths = [self.upgrade_marker_path] - if exclude_paths is not None: - for p in exclude_paths: - if not p.startswith(os.path.sep): - p = os.path.join(self.statedir, p) - self.exclude_paths.append(p) - - self.target_uid, self.target_gid = self._get_nova_ids() - self.previous_uid, self.previous_gid = self._get_previous_nova_ids() - self.id_change = (self.target_uid, self.target_gid) != \ - (self.previous_uid, self.previous_gid) - self.target_secontext = self._get_secontext() - - def _get_nova_ids(self): - nova_uid, nova_gid = pwd.getpwnam(self.nova_user)[2:4] - return nova_uid, nova_gid - - def _get_previous_nova_ids(self): - if self.upgrade: - statinfo = os.stat(self.upgrade_marker_path) - return statinfo.st_uid, statinfo.st_gid - else: - return self._get_nova_ids() - - def _get_secontext(self): - if os.path.exists(self.secontext_marker_path): - return selinux.lgetfilecon(self.secontext_marker_path)[1] - else: - return None - - def _walk(self, top, chcon=True): - for f in os.listdir(top): - pathname = os.path.join(top, f) - - if pathname in self.exclude_paths: - continue - - try: - pathinfo = PathManager(pathname) - LOG.info("Checking %s", pathinfo) - if pathinfo.is_dir: - # Always chown the directories - pathinfo.chown(self.target_uid, self.target_gid) - chcon_r = chcon - if chcon: - chcon_r = pathinfo.chcon(self.target_secontext) - self._walk(pathname, chcon_r) - elif self.id_change: - # Only chown files if it's an upgrade and the file is owned by - # the host nova uid/gid - pathinfo.chown( - self.target_uid if pathinfo.uid == self.previous_uid - else pathinfo.uid, - self.target_gid if pathinfo.gid == self.previous_gid - else pathinfo.gid - ) - if chcon: - pathinfo.chcon(self.target_secontext) - except Exception: - # Likely to have been caused by external systems - # interacting with this directory tree, - # especially on NFS e.g snapshot dirs. - # Just ignore it and continue on to the next entry - continue - - def run(self): - LOG.info('Applying nova statedir ownership') - LOG.info('Target ownership for %s: %d:%d', - self.statedir, - self.target_uid, - self.target_gid) - - pathinfo = PathManager(self.statedir) - LOG.info("Checking %s", pathinfo) - pathinfo.chown(self.target_uid, self.target_gid) - chcon = self.target_secontext is not None - - if chcon: - pathinfo.chcon(self.target_secontext) - - self._walk(self.statedir, chcon) - - if self.upgrade: - LOG.info('Removing upgrade_marker %s', - self.upgrade_marker_path) - os.unlink(self.upgrade_marker_path) - - LOG.info('Nova statedir ownership complete') - - -def get_exclude_paths(): - exclude_paths = os.environ.get('NOVA_STATEDIR_OWNERSHIP_SKIP') - if exclude_paths is not None: - exclude_paths = exclude_paths.split(os.pathsep) - return exclude_paths - - -if __name__ == '__main__': - NovaStatedirOwnershipManager('/var/lib/nova', exclude_paths=get_exclude_paths()).run() diff --git a/container_config_scripts/nova_wait_for_api_service.py b/container_config_scripts/nova_wait_for_api_service.py deleted file mode 100755 index 17d2bf29e3..0000000000 --- a/container_config_scripts/nova_wait_for_api_service.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import argparse -from configparser import ConfigParser -import logging -import logging.handlers -import os -import sys -import time - - -from keystoneauth1 import loading -from keystoneauth1 import session - -from novaclient import client -from novaclient.exceptions import ClientException - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('nova_wait_for_api_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -nova_cfg = '/etc/nova/nova.conf' - -if __name__ == '__main__': - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - parser.add_argument('-k', '--insecure', - action="store_false", - dest='insecure', - default=True, - help='Allow insecure connection when using SSL') - - args = parser.parse_args() - LOG.debug('Running with parameter insecure = %s', - args.insecure) - - if os.path.isfile(nova_cfg): - try: - config.read(nova_cfg) - except Exception: - LOG.exception('Error while reading nova.conf:') - else: - LOG.error('Nova configuration file %s does not exist', nova_cfg) - sys.exit(1) - - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options( - auth_url=config.get('neutron', - 'auth_url'), - username=config.get('neutron', - 'username'), - password=config.get('neutron', - 'password'), - project_name=config.get('neutron', - 'project_name'), - project_domain_name=config.get('neutron', - 'project_domain_name'), - user_domain_name=config.get('neutron', - 'user_domain_name')) - sess = session.Session(auth=auth, verify=args.insecure) - - # Wait until this host is listed in the service list - for i in range(iterations): - try: - nova = client.Client('2.11', session=sess, endpoint_type='internal') - nova.versions.list() - LOG.info('Nova-api service active') - sys.exit(0) - except ClientException: - LOG.info('Waiting for nova-api service') - except Exception: - LOG.exception( - 'Error while waiting for nova-api service') - time.sleep(timeout) -sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/nova_wait_for_compute_service.py b/container_config_scripts/nova_wait_for_compute_service.py deleted file mode 100644 index 61e0d618a2..0000000000 --- a/container_config_scripts/nova_wait_for_compute_service.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import argparse -from configparser import ConfigParser -import logging -import logging.handlers -import os -import socket -import sys -import time - - -from keystoneauth1 import loading -from keystoneauth1 import session - -from novaclient import client - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('nova_wait_for_compute_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -nova_cfg = '/etc/nova/nova.conf' - -if __name__ == '__main__': - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - parser.add_argument('-k', '--insecure', - action="store_false", - dest='insecure', - default=True, - help='Allow insecure connection when using SSL') - - args = parser.parse_args() - LOG.debug('Running with parameter insecure = %s', - args.insecure) - - if os.path.isfile(nova_cfg): - try: - config.read(nova_cfg) - except Exception: - LOG.exception('Error while reading nova.conf:') - else: - LOG.error('Nova configuration file %s does not exist', nova_cfg) - sys.exit(1) - - my_host = config.get('DEFAULT', 'host') - if not my_host: - # If host isn't set nova defaults to this - my_host = socket.gethostname() - - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options( - auth_url=config.get('neutron', - 'auth_url'), - username=config.get('neutron', - 'username'), - password=config.get('neutron', - 'password'), - project_name=config.get('neutron', - 'project_name'), - project_domain_name=config.get('neutron', - 'project_domain_name'), - user_domain_name=config.get('neutron', - 'user_domain_name')) - sess = session.Session(auth=auth, verify=args.insecure) - nova = client.Client('2.11', session=sess, endpoint_type='internal', - region_name=config.get('neutron', 'region_name')) - - # Wait until this host is listed in the service list - for i in range(iterations): - try: - service_list = nova.services.list(binary='nova-compute') - for entry in service_list: - host = getattr(entry, 'host', '') - zone = getattr(entry, 'zone', '') - if host == my_host and zone != 'internal': - LOG.info('Nova-compute service registered') - sys.exit(0) - LOG.info('Waiting for nova-compute service to register') - except Exception: - LOG.exception( - 'Error while waiting for nova-compute service to register') - time.sleep(timeout) -sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/pacemaker_mutex_restart_bundle.sh b/container_config_scripts/pacemaker_mutex_restart_bundle.sh deleted file mode 100755 index 063b7e3c8f..0000000000 --- a/container_config_scripts/pacemaker_mutex_restart_bundle.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -# pacemaker_mutex_restart_bundle.sh --lock mysql galera galera-bundle Master _ -# pacemaker_mutex_restart_bundle.sh --lock ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master - -set -u - -usage() { - echo "Restart a clustered resource in a coordinated way across the cluster" - echo "Usage:" - echo " $0 --lock " - echo -} - -log() { - echo "$(date -u): $1" -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -pacemaker_supports_promoted() { - # The Promoted token is only matched in recent pacemaker versions - grep -wq "Promoted" /usr/share/pacemaker/resources-*.rng -} - -ACTION=$1 -case $ACTION in - --help) usage; exit 0;; - --lock) ;; - *) error "Unknown action '$ACTION'";; -esac - -TRIPLEO_SERVICE=$2 -LOCK_NAME=${TRIPLEO_SERVICE}-restart-lock -LOCK_OWNER=$(crm_node -n 2>/dev/null) -rc=$? -if [ $rc -ne 0 ]; then - if [ $rc -eq 102 ]; then - log "Cluster is not running locally, no need to restart resource $TRIPLEO_SERVICE" - exit 0 - else - error "Unexpected error while connecting to the cluster (rc: $rc), bailing out" - fi -fi - -RESOURCE_NAME=$3 -BUNDLE_NAME=$4 -WAIT_TARGET_LOCAL=$5 -WAIT_TARGET_ANYWHERE=${6:-_} - -if pacemaker_supports_promoted; then - WAIT_TARGET_LOCAL=$(echo "$5" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - WAIT_TARGET_ANYWHERE=$(echo "${6:-_}" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - promoted_role="Promoted" -else - promoted_role="Master" -fi - -# The lock TTL should accommodate for the resource start/promote timeout -if [ "$RESOURCE_NAME" != "$BUNDLE_NAME" ]; then - if [ "$WAIT_TARGET_LOCAL" = "$promoted_role" ] || [ "$WAIT_TARGET_ANYWHERE" = "$promoted_role" ]; then - rsc_op="promote" - else - rsc_op="start" - fi - # - PCMK_TTL=$(cibadmin -Q | xmllint -xpath "string(//primitive[@id='${RESOURCE_NAME}']/operations/op[@name='${rsc_op}']/@timeout)" - | sed 's/s$//') - LOCK_TTL=$((PCMK_TTL + 30)) -else - # The podman RA's default start timeout - LOCK_TTL=90 -fi - -log "Acquire a ${LOCK_TTL}s restart lock for service $TRIPLEO_SERVICE before restarting it" -# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually -rc=1 -while [ $rc -ne 0 ]; do - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire $LOCK_NAME $LOCK_OWNER $LOCK_TTL - rc=$? - if [ $rc != 0 ]; then - if [ $rc -gt 1 ]; then - error "Could not acquire lock due to unrecoverable error (rc: $rc), bailing out" - else - log "Could not acquire lock, retrying" - sleep 10 - fi - fi -done - -log "Restart the service $TRIPLEO_SERVICE locally" -# Reuse the local restart script in t-h-t (driven by env var TRIPLEO_MINOR_UPDATE) -TRIPLEO_MINOR_UPDATE=true /var/lib/container-config-scripts/pacemaker_restart_bundle.sh $TRIPLEO_SERVICE $RESOURCE_NAME $BUNDLE_NAME $WAIT_TARGET_LOCAL $WAIT_TARGET_ANYWHERE - -# If we reached this point, always try to release the lock -log "Release the restart lock for service $TRIPLEO_SERVICE" -/var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $LOCK_NAME $LOCK_OWNER -rc=$? -if [ $rc -ne 0 ] && [ $rc -ne 1 ]; then - error "Could not release held lock (rc: $rc)" -fi diff --git a/container_config_scripts/pacemaker_mutex_shutdown.sh b/container_config_scripts/pacemaker_mutex_shutdown.sh deleted file mode 100755 index 1f168a16c6..0000000000 --- a/container_config_scripts/pacemaker_mutex_shutdown.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -# pacemaker_mutex_shutdown.sh --acquire -# pacemaker_mutex_shutdown.sh --release - -set -u - -usage() { - echo "Shutdown a cluster node in a coordinated way across the cluster" - echo "Usage:" - echo " $0 --acquire # prevent other node from shutting down until we hold the lock" - echo " $0 --release # release the lock, other node can compete for the shutdown lock" - echo -} - -log() { - echo "$(date -u): $1" -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -# Loop until we hold the lock. The lock has a TTL, so we're guaranteed to get it eventually -shutdown_lock_acquire() { - local lockname=$1 - local requester=$2 - local ttl=$3 - local rc=1 - local current_owner - local owner_stopped - local owner_rc - - log "Acquiring the shutdown lock" - while [ $rc -ne 0 ]; do - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --acquire-once $lockname $requester $ttl - rc=$? - if [ $rc -ne 0 ]; then - if [ $rc -eq 2 ]; then - error "Could not acquire the shutdown lock due to unrecoverable error (rc: $rc), bailing out" - else - # The lock is held by another node. - current_owner=$(/var/lib/container-config-scripts/pacemaker_resource_lock.sh --owner $lockname) - owner_rc=$? - if [ $owner_rc -eq 2 ]; then - error "Could not get the shutdown lock owner due to unrecoverable error (rc: $owner_rc), bailing out" - fi - if [ $owner_rc -eq 0 ]; then - # If the owner is marked as offline, that means it has shutdown and - # we can clean the lock preemptively and try to acquire it. - owner_stopped=$(crm_mon -1X | xmllint --xpath 'count(//nodes/node[@name="'${current_owner}'" and @online="false" and @unclean="false"])' -) - if [ "${owner_stopped}" = "1" ]; then - log "Shutdown lock held by stopped node '${current_owner}', lock can be released" - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $current_owner - continue - fi - fi - log "Shutdown lock held by another node (rc: $rc), retrying" - sleep 10 - fi - fi - done - log "Shutdown lock acquired" - return 0 -} - - -# Release the lock if we still own it. Not owning it anymore is not fatal -shutdown_lock_release() { - local lockname=$1 - local requester=$2 - local rc - - log "Releasing the shutdown lock" - /var/lib/container-config-scripts/pacemaker_resource_lock.sh --release $lockname $requester - rc=$? - if [ $rc -ne 0 ]; then - if [ $rc -gt 1 ]; then - error "Could not release the shutdown lock due to unrecoverable error (rc: $rc), bailing out" - else - log "Shutdown lock no longer held, nothing to do" - fi - else - log "Shutdown lock released" - fi - return 0 -} - - -ACTION=$1 -if [ -z "$ACTION" ]; then - error "Action must be specified" -fi - -LOCK_NAME=tripleo-shutdown-lock -LOCK_OWNER=$(crm_node -n 2>/dev/null) -rc=$? -if [ $rc -ne 0 ]; then - if [ $rc -eq 102 ]; then - log "Cluster is not running locally, no need to acquire the shutdown lock" - exit 0 - else - error "Unexpected error while connecting to the cluster (rc: $rc), bailing out" - fi -fi - -# We start with a very high TTL, that long enough to accommodate a cluster stop. -# As soon as the node will get offline, the other competing node will be entitled -# to steal the lock, so they should never wait that long in practice. -LOCK_TTL=600 - - -case $ACTION in - --help) usage; exit 0;; - --acquire|-a) shutdown_lock_acquire ${LOCK_NAME} ${LOCK_OWNER} ${LOCK_TTL};; - --release|-r) shutdown_lock_release ${LOCK_NAME} ${LOCK_OWNER};; - *) error "Invalid action";; -esac -exit $? diff --git a/container_config_scripts/pacemaker_resource_lock.sh b/container_config_scripts/pacemaker_resource_lock.sh deleted file mode 100755 index fc4bf91250..0000000000 --- a/container_config_scripts/pacemaker_resource_lock.sh +++ /dev/null @@ -1,267 +0,0 @@ -#!/bin/bash - -MAX_RETRIES=10 -CIB_ENOTFOUND=105 - -usage() { - echo "Set a global property in the cluster with a validity timestamp." - echo "Usage:" - echo " $0 --acquire " - echo " $0 --release " - echo -} - -log() { - echo "$(date -u): $1" 1>&2 -} - -error() { - echo "$(date -u): $1" 1>&2 - exit 1 -} - -lock_create() { - local name=$1 - local data=$2 - # cibadmin won't overwrite a key if someone else succeeded to create it concurrently - cibadmin --sync-call --scope crm_config --create --xml-text "" &>/dev/null - return $? -} - -lock_update() { - local name=$1 - local expected_data=$2 - local new_data=$3 - # we only update the lock we expect to see, so we can't update someone else's lock - cibadmin --sync-call --scope crm_config --modify --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." --xml-text "" &>/dev/null - return $? -} - -lock_delete() { - local name=$1 - local expected_data=$2 - # we only delete the lock we expect to see, so we can't delete someone else's lock - cibadmin --sync-call --scope crm_config --delete --xpath "//cluster_property_set/nvpair[@name='${name}' and @value='${expected_data}']/.." &>/dev/null - return $? -} - -lock_get() { - local lockname=$1 - local res - local rc - res=$(cibadmin --query --scope crm_config --xpath "//cluster_property_set/nvpair[@name='$lockname']" 2>/dev/null) - rc=$? - if [ $rc -eq 0 ]; then - echo "$res" | sed -n 's/.*value="\([^"]*\)".*/\1/p' - fi - return $rc -} - -lock_owner() { - local lock=$1 - echo "$lock" | cut -d':' -f1 -} - -lock_has_expired() { - local lock=$1 - local expiry=$(echo "$lock" | cut -d':' -f2) - local now=$(date +%s) - test $now -ge $expiry -} - - -# Perform a lock action and restart if the CIB has been modified before -# committing the lock action -try_action() { - local fun=$1 - local lock=$2 - local requester=$3 - local args=${4:-} - local tries=$MAX_RETRIES - local rc=1 - if [ "$fun" = "lock_acquire" ] || [ "$fun" = "lock_release" ]; then - log "Try running $fun" - else - return 2 - fi - while [ $rc -ne 0 ]; do - $fun $lock $requester $args - rc=$? - if [ $rc -eq 0 ]; then - log "Operation $1 succeeded" - return 0 - elif [ $rc -eq 3 ]; then - # rc == 3 -> CIB changed before push - if [ $tries -eq 0 ]; then - log "Failed to commit after $MAX_RETRIES retries. Bailing out." - return 2 - else - log "Failed to commit. Retrying operation." - tries=$(($tries - 1)) - fi - elif [ $rc -eq 2 ]; then - # rc == 2 -> unrecoverable cib error (e.g. pacemaker down) - log "Unexpected failure. Bailing out" - return $rc - else - # rc == 1 -> lock error (not owner, lock doesn't exists) - return $rc - fi - done -} - -# The lock mechanism uses cibadmin's atomic creation so cluster-wide -# state coherency is guaranteed by pacemaker -lock_acquire() { - local lockname=$1 - local requester=$2 - local ttl=$3 - local rc - local lock - local expiry - local owner - - log "Check whether the lock is already held in the CIB" - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - log "Could not retrieve info from the CIB" - return 2 - fi - - if [ -n "$lock" ]; then - lock_has_expired $lock - rc=$? - if [ $rc -eq 0 ]; then - log "Lock has expired, now available for being held" - else - # lock is still held. check whether we're the owner - owner=$(lock_owner $lock) - if [ "$owner" = "$requester" ];then - log "Requester already owns the lock, acquiring attempt will just reconfigure the TTL" - else - log "Lock is held by someone else ($owner)" - return 1 - fi - fi - else - log "Lock is not held yet" - fi - - # prepare the lock info - expiry=$(($(date +%s) + $ttl)) - - if [ -n "$lock" ]; then - log "Attempting to update the lock" - lock_update $lockname "$lock" "$requester:$expiry" - rc=$? - else - log "Attempting to acquire the lock" - lock_create $lockname "$requester:$expiry" - rc=$? - fi - - if [ $rc -eq 0 ]; then - log "Lock '$lockname' acquired by '$requester', valid until $(date -d @$expiry)" - return 0 - else - log "CIB changed, lock cannot be acquired" - return 3 - fi -} - - -# The lock mechanism uses the CIB's num_updates tag to implement -# a conditional store. Cluster-wide locking is guaranteed by pacemaker -lock_release() { - local lockname=$1 - local requester=$2 - local rc - local lock - local owner - - log "Check whether the lock is already held in the CIB" - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - log "Could not retrieve info from the CIB" - return 2 - fi - - if [ -z "$lock" ]; then - log "Lock doesn't exist. Nothing to release" - return 0 - else - log "Lock exists, check whether we're the owner" - owner=$(lock_owner $lock) - if [ "$owner" != "$requester" ];then - log "Lock is held by someone else ($owner), will not unlock" - return 1 - fi - fi - - lock_delete $lockname "$lock" - rc=$? - - if [ $rc -eq 0 ]; then - log "Lock '$lockname' released by '$requester'" - return 0 - else - log "CIB deletion error, lock cannot be released" - return 3 - fi -} - - -# Retrieve the owner of a lock from the CIB -# this is a read-only operation, so no need to log debug info -lock_get_owner() { - local lockname=$1 - local rc - local lock - local owner - - lock=$(lock_get $lockname) - rc=$? - if [ $rc -ne 0 ] && [ $rc -ne $CIB_ENOTFOUND ]; then - return 2 - fi - - if [ -z "$lock" ]; then - return 1 - else - lock_owner $lock - return 0 - fi -} - - -ACTION=$1 -LOCKNAME=$2 -REQUESTER=$3 -TTL=${4:-60} - -if [ -z "$ACTION" ]; then - error "Action must be specified" -fi - -if [ $ACTION != "--help" ]; then - if [ -z "$LOCKNAME" ]; then - error "You must specific a lock name" - fi - if [ $ACTION != "--owner" ] && [ $ACTION != "-o" ]; then - if [ -z "$REQUESTER" ]; then - error "You must specific a lock requester" - fi - fi -fi - -case $ACTION in - --help) usage; exit 0;; - --acquire|-a) try_action lock_acquire $LOCKNAME $REQUESTER $TTL;; - --release|-r) try_action lock_release $LOCKNAME $REQUESTER;; - --acquire-once|-A) lock_acquire $LOCKNAME $REQUESTER $TTL;; - --owner|-o) lock_get_owner $LOCKNAME;; - *) error "Invalid action";; -esac -exit $? diff --git a/container_config_scripts/pacemaker_restart_bundle.sh b/container_config_scripts/pacemaker_restart_bundle.sh deleted file mode 100755 index 5c12541cba..0000000000 --- a/container_config_scripts/pacemaker_restart_bundle.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -set -u - -# ./pacemaker_restart_bundle.sh mysql galera galera-bundle Master _ -# ./pacemaker_restart_bundle.sh redis redis redis-bundle Slave Master -# ./pacemaker_restart_bundle.sh ovn_dbs ovndb_servers ovn-dbs-bundle Slave Master -RESTART_SCRIPTS_DIR=$(dirname $0) -TRIPLEO_SERVICE=$1 -RESOURCE_NAME=$2 -BUNDLE_NAME=$3 -WAIT_TARGET_LOCAL=$4 -WAIT_TARGET_ANYWHERE=${5:-_} -TRIPLEO_MINOR_UPDATE="${TRIPLEO_MINOR_UPDATE:-false}" -TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS="${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS:-false}" - -bundle_can_be_restarted() { - local bundle=$1 - # As long as the resource bundle is managed by pacemaker and is - # not meant to stay stopped, no matter the state of any inner - # pcmk_remote or ocf resource, we should restart it to give it a - # chance to read the new config. - [ "$(crm_resource --meta -r $1 -g is-managed 2>/dev/null)" != "false" ] && \ - [ "$(crm_resource --meta -r $1 -g target-role 2>/dev/null)" != "Stopped" ] -} - -log() { - local msg=$1 - logger -t pcmkrestart "$1" -} - -HOSTNAME=$(/bin/hostname -s) -if [ x"${TRIPLEO_MINOR_UPDATE,,}" != x"true" ]; then - if [ x"${TRIPLEO_HA_WRAPPER_RESOURCE_EXISTS,,}" = x"false" ]; then - # Do not restart during initial deployment, as the resource - # has just been created. - SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name") - if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - log "Initial deployment, skipping the restart of ${BUNDLE_NAME}" - fi - exit 0 - else - # During a stack update, this script is called in parallel on - # every node the resource runs on, after the service's configs - # have been updated on all nodes. So we need to run pcs only - # once (e.g. on the service's boostrap node). - if bundle_can_be_restarted ${BUNDLE_NAME}; then - SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${TRIPLEO_SERVICE}_short_bootstrap_node_name") - if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - replicas_running=$(crm_resource -Q -r $BUNDLE_NAME --locate 2>&1 | wc -l) - if [ "$replicas_running" != "0" ]; then - log "Restarting ${BUNDLE_NAME} globally. Stopping:" - /sbin/pcs resource disable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME - log "Restarting ${BUNDLE_NAME} globally. Starting:" - /sbin/pcs resource enable --wait=__PCMKTIMEOUT__ $BUNDLE_NAME - else - log "${BUNDLE_NAME} is not running anywhere," \ - "cleaning up to restart it globally if necessary" - /sbin/pcs resource cleanup $BUNDLE_NAME - fi - else - log "Skipping global restart of ${BUNDLE_NAME} on ${HOSTNAME} it will be restarted by node ${SERVICE_NODEID}" - fi - - else - log "No global restart needed for ${BUNDLE_NAME}." - fi - fi -else - # During a minor update workflow however, a host gets fully - # updated before updating the next one. So unlike stack - # update, at the time this script is called, the service's - # configs aren't updated on all nodes yet. So only restart the - # resource locally, where it's guaranteed that the config is - # up to date. - HOST=$(facter hostname) - - if bundle_can_be_restarted ${BUNDLE_NAME}; then - # if the resource is running locally, restart it - if crm_resource -r $BUNDLE_NAME --locate 2>&1 | grep -w -q "${HOST}"; then - log "Restarting ${BUNDLE_NAME} locally on '${HOST}'" - /sbin/pcs resource restart $BUNDLE_NAME "${HOST}" - - else - # At this point, if no resource is running locally, it's - # either because a) it has failed previously, or b) because - # it's an A/P resource running elsewhere. - # By cleaning up resource, we ensure that a) it will try to - # restart, or b) it won't do anything if the resource is - # already running elsewhere. - log "${BUNDLE_NAME} is currently not running on '${HOST}'," \ - "cleaning up its state to restart it if necessary" - /sbin/pcs resource cleanup $BUNDLE_NAME node="${HOST}" - fi - - # Wait until the resource is in the expected target state - $RESTART_SCRIPTS_DIR/pacemaker_wait_bundle.sh \ - $RESOURCE_NAME $BUNDLE_NAME \ - "$WAIT_TARGET_LOCAL" "$WAIT_TARGET_ANYWHERE" \ - "${HOST}" __PCMKTIMEOUT__ - else - log "No restart needed for ${BUNDLE_NAME}." - fi -fi diff --git a/container_config_scripts/pacemaker_wait_bundle.sh b/container_config_scripts/pacemaker_wait_bundle.sh deleted file mode 100755 index 93335aabdf..0000000000 --- a/container_config_scripts/pacemaker_wait_bundle.sh +++ /dev/null @@ -1,333 +0,0 @@ -#!/bin/bash - -# ---- -# Wait for an OCF resource or a bundle to be restarted -# ---- -# e.g.: -# M/S OCF: $0 galera galera-bundle Master -# clone OCF: $0 rabbitmq rabbitmq-bundle Started -# A/P M/S OCF: $0 redis redis-bundle Slave Master -# A/P bundle: $0 openstack-cinder-volume openstack-cinder-volume _ Started -# clone bundle: $0 haproxy-bundle haproxy-bundle Started - -# design note 1: -# - this script is called during a minor update; it is called -# once per node that hosts a service replica. -# - the purpose of this script is to ensure that restarting the -# service replica locally won't disrupt the service availability -# for the end user. To reach that goal, the script waits until the -# service is restarted locally or globally and reaches a given -# target state (i.e. Started, Slave or Master). -# design note 2: -# - we don't want to track restart error: our only job is to ensure -# service restart synchronization, not service health. -# - In particular, we don't want to error out in case the resource -# cannot be restarted locally, because that would make the minor -# update fail, even if potentially other replicas still provide -# the service. -# design note 3: -# - we can bail out early if we determine that the resource can't -# be restarted automatically by pacemaker (e.g. its "blocked", -# unmanaged or disabled). - -log() { - local msg=$1 - echo "$(date -u): $1" -} - -usage() { - echo 2>&1 "Usage: $0 NAME BUNDLE_NAME ROLE_LOCAL [ROLE_ANYWHERE] [HOST] [TIMEOUT]" - exit 1 -} - -pacemaker_supports_promoted() { - # The Promoted token is only matched in recent pacemaker versions - grep -wq "Promoted" /usr/share/pacemaker/resources-*.rng -} - -# -# Utility functions to detect stuck resources -# - -bundle_failures_locally() { - local engine=$BUNDLE_CONTAINER_ENGINE - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - local replica_name - for i in $(seq 0 $last); do - replica_name=${BUNDLE_NAME}-${engine}-${i} - crm_failcount -q -G -r $replica_name -N $HOST - done -} - -bundle_failures_globally() { - local engine=$BUNDLE_CONTAINER_ENGINE - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - for i in $(seq 0 $last); do - crm_failcount -q -G -r ${BUNDLE_NAME}-${engine}-${i} - done -} - -bundle_running_globally() { - local engine=$BUNDLE_CONTAINER_ENGINE - # return the number of running bundles replica, i.e. the number of - # docker/podman resource replicas currently running in the cluster - crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:heartbeat:${engine}']/node)" - -} - -ocf_failures_globally() { - local replicas=$BUNDLE_REPLICAS - local last=$(($replicas - 1)) - local bundle_node - for i in $(seq 0 $last); do - bundle_node=${BUNDLE_NAME}-${i} - crm_failcount -q -G -r $NAME -N $bundle_node - done -} - -did_resource_failed_locally() { - local failures - local running - local remotehost - if [ "${NAME}" != "${BUNDLE_NAME}" ]; then - # if we're dealing with an ocf resource, it is running on a - # pacemaker_remote rather that on the real host, and the - # failcounts are thus associated to the pcmk remote. Replace - # the host's name with the pcmk remote's name. - remotehost=$(crm_mon --as-xml | xmllint --xpath "string(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource/node[@name='${HOST}']/../../resource[@resource_agent='${OCF}:pacemaker:remote']/@id)" -) - if [ -n "${remotehost}" ]; then - crm_failcount -q -G -r $NAME -N $remotehost | grep -q -w INFINITY - return $? - fi - # If no pcmk remote is currently running, the failcount from - # the ocf resource is useless, compute the failcount from the - # bundle case instead (computed below). - fi - - # for bundles, pacemaker can run any bundle replica locally - # (e.g. galera-bundle-docker-{0,1,2}), and a failure happens when - # there are no more replica to try. - # That is, when _at least_ one replica failed locally, and all the - # others either failed or are currently running elsewhere. - failures=$(bundle_failures_locally $HOST | grep -c -w INFINITY) - running=$(bundle_running_globally) - test $failures -gt 0 && \ - test $(( $failures + $running )) -ge $BUNDLE_REPLICAS -} - -did_resource_failed_globally() { - local remotecount - local failures - if [ "${NAME}" != "${BUNDLE_NAME}" ]; then - # we check the state of an ocf resource only if the - # pcmkremotes are started - remotecount=$(crm_mon --as-xml | xmllint --xpath "count(//resources/bundle[@id='${BUNDLE_NAME}']/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node)" -) - if [ "${remotecount}" = "0" ]; then - # no pcmkremote is running, so check the bundle state - # instead of checking the ocf resource - # bundle failed if all ${BUNDLE_REPLICAS} replicas failed - failures=$(bundle_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - else - # ocf resource failed if it failed to start on - # all $BUNDLE_REPLICAS bundle nodes - failures=$(ocf_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - fi - else - # bundle failed if all ${BUNDLE_REPLICAS} replicas failed - failures=$(bundle_failures_globally | grep -c -w INFINITY) - test $failures -eq $BUNDLE_REPLICAS - fi -} - - -# Input validation -# - -NAME=$1 -if [ -z "${NAME}" ]; then - echo 2>&1 "Error: argument NAME must not be empty" - exit 1 -fi - -BUNDLE_NAME=$2 -if [ -z "${BUNDLE_NAME}" ]; then - echo 2>&1 "Error: argument BUNDLE_NAME must not be empty" - exit 1 -fi - -ROLE_LOCAL=$3 -if [ "${ROLE_LOCAL}" = "_" ]; then - ROLE_LOCAL="" -fi - -ROLE_ANYWHERE=$4 -if [ "${ROLE_ANYWHERE}" = "_" ]; then - ROLE_ANYWHERE="" -fi - -if [ -z "${ROLE_LOCAL}" ]; then - if [ -z "${ROLE_ANYWHERE}" ]; then - echo 2>&1 "Error: either ROLE_LOCAL or ROLE_ANYWHERE must be non empty" - exit 1 - fi -else - if !(echo "${ROLE_LOCAL}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then - echo 2>&1 "Error: argument ROLE_LOCAL must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'" - exit 1 - fi -fi - -if [ -n "${ROLE_ANYWHERE}" ] && !(echo "${ROLE_ANYWHERE}" | grep -q -x -E "(Started|Slave|Master|Unpromoted|Promoted)"); then - echo 2>&1 "Error: argument ROLE_ANYWHERE must be either 'Started' 'Slave' 'Master' 'Unpromoted' or 'Promoted'" - exit 1 -fi - -# Ensure compatibility with pacemaker 2.1 -if pacemaker_supports_promoted; then - ROLE_LOCAL=$(echo "$ROLE_LOCAL" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - ROLE_ANYWHERE=$(echo "$ROLE_ANYWHERE" | sed -e 's/Master/Promoted/' -e 's/Slave/Unpromoted/') - OCF="ocf" -else - OCF="ocf:" -fi - -HOST=${5:-$(facter hostname)} -TIMEOUT=${6:-__PCMKTIMEOUT__} - - -# Configure the search -# ---- -# Note: we can't use crm_resource in all searches because we can't -# easily extract the host the OCF resources run on (crm_resource -# returns the pcmk-remote nodes rather than the hosts) -# So instead, we implement various searches with XPath directly. - -if [ "${BUNDLE_NAME}" != "${NAME}" ]; then - # ocf resource - local_resource_xpath="//bundle/replica/resource[@resource_agent='${OCF}:pacemaker:remote']/node[@name='${HOST}']/../../resource[@id='${NAME}']" - any_resource_xpath="//bundle//resource[@id='${NAME}']" - replicas_xpath="//bundle/primitive[@id='${BUNDLE_NAME}']/../*[boolean(@image) and boolean(@replicas)]" -else - # bundle resource - local_resource_xpath="//bundle[@id='${NAME}']/replica/resource/node[@name='${HOST}']/../../resource" - any_resource_xpath="//bundle[@id='${NAME}']//resource" - replicas_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]" -fi - -bundle_def_xpath="//bundle[@id='${BUNDLE_NAME}']/*[boolean(@image) and boolean(@replicas)]" -BUNDLE_CONTAINER_ENGINE=$(cibadmin -Q | xmllint --xpath "name(${bundle_def_xpath})" -) -BUNDLE_REPLICAS=$(cibadmin -Q | xmllint --xpath "string(${bundle_def_xpath}/@replicas)" -) - - -# The wait algorithm follows a two-stage approach -# 1. Depending on how the script is called, we first check whether -# the resource is restarted locally. An A/P resource may be -# restarted elsewhere in the cluster. -# 2. If needed, check whether the A/P resource has restarted -# elsewhere. For A/P M/S resources, in case the resource is -# restarted as Slave locally, ensure a Master is available. - -success=1 -bailout=1 -timeout=$TIMEOUT -role="" - -# Stage 1: local check -if [ -n "$ROLE_LOCAL" ]; then - log "Waiting until ${NAME} has restarted on ${HOST} and is in state ${ROLE_LOCAL}" - log "Will probe resource state with the following XPath pattern: ${local_resource_xpath}" - - while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do - resource=$(crm_mon -r --as-xml | xmllint --xpath "${local_resource_xpath}" - 2>/dev/null) - role=$(echo "${resource}" | sed -ne 's/.*\Wrole="\([^"]*\)".*/\1/p') - - if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then - log "${NAME} is unmanaged, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then - log "${NAME} is disabled, will never reach target role. Bailing out" - bailout=0 - continue - elif echo "${resource}" | grep -q -w "\Wblocked=\"true\""; then - log "${NAME} is blocked, will never reach target role. Bailing out" - bailout=0 - continue - elif did_resource_failed_locally; then - log "${NAME} is in failed state, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$role" = "$ROLE_LOCAL" ]; then - success=0 - continue - elif [ -n "$ROLE_ANYWHERE" ] && [ "$role" = "$ROLE_ANYWHERE" ]; then - # A/P: we are restarted in the expected state - success=0 - continue - else - log "Waiting for ${NAME} to transition to role ${ROLE_LOCAL} on ${HOST}" - fi - - if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then - sleep 4 - timeout=$((timeout-4)) - fi - done -fi - -# Stage 2: global check -if [ $timeout -gt 0 ] && [ -n "$ROLE_ANYWHERE" ] && [ "$role" != "$ROLE_ANYWHERE" ]; then - log "Wait until ${NAME} is restarted anywhere in the cluster in state ${ROLE_ANYWHERE}" - log "Will probe resource state with the following XPath pattern: ${any_resource_xpath}" - - success=1 - bailout=1 - while [ $timeout -gt 0 ] && [ $bailout -ne 0 ] && [ $success -ne 0 ]; do - resources=$(crm_mon -r --as-xml | xmllint --xpath "${any_resource_xpath}" - 2>/dev/null) - if [ "$(crm_resource --meta -r ${NAME} -g is-managed 2>/dev/null)" = "false" ]; then - log "${NAME} is unmanaged, will never reach target role. Bailing out" - bailout=0 - continue - elif [ "$(crm_resource --meta -r ${NAME} -g target-role 2>/dev/null)" = "Stopped" ]; then - log "${NAME} is disabled, will never reach target role. Bailing out" - bailout=0 - continue - elif ! (echo "${resources}" | grep -q -w "\Wblocked=\"false\""); then - log "${NAME} blocked, will never reach target role. Bailing out" - bailout=0 - continue - elif did_resource_failed_globally; then - log "${NAME} is in failed state, will never reach target role. Bailing out" - bailout=0 - continue - elif echo "${resources}" | grep -q -w "\Wrole=\"${ROLE_ANYWHERE}\""; then - success=0 - continue - else - log "Waiting for ${NAME} to transition to role ${ROLE_ANYWHERE} anywhere in the cluster" - fi - - if [ $bailout -ne 0 ] && [ $success -ne 0 ]; then - sleep 4 - timeout=$((timeout-4)) - fi - done -fi - -if [ $timeout -le 0 ]; then - log "Timeout reached after ${TIMEOUT}s while waiting for ${NAME} to be restarted" -elif [ $bailout -le 0 ]; then - log "Restart monitoring for ${NAME} cancelled" -fi - -if [ $success -eq 0 ]; then - log "${NAME} successfully restarted" -else - log "${NAME} was not restarted properly" -fi - -# Don't block minor update or stack update if the wait was unsuccessful -exit 0 diff --git a/container_config_scripts/placement_wait_for_service.py b/container_config_scripts/placement_wait_for_service.py deleted file mode 100755 index da6286eeb9..0000000000 --- a/container_config_scripts/placement_wait_for_service.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# shell script to check if placement API is up after X attempts. -# Default max is 60 iterations with 10s (default) timeout in between. - -from configparser import ConfigParser -import logging -import logging.handlers -import os -import re -import sys -import time - -from keystoneauth1.identity import v3 -from keystoneauth1 import session -from keystoneclient.v3 import client -import requests - - -config = ConfigParser(strict=False) - -debug = os.getenv('__OS_DEBUG', 'false') - -if debug.lower() == 'true': - loglevel = logging.DEBUG -else: - loglevel = logging.INFO - -LOG = logging.getLogger('placement_wait_for_service') -LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s ' - '%(name)s %(message)s') -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' -LOG.handlers.clear() -LOG.setLevel(loglevel) -LOG.propagate = True -formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT) -stream_handler = logging.StreamHandler(sys.stdout) -stream_handler.setFormatter(formatter) -LOG.addHandler(stream_handler) - -iterations = 60 -timeout = 10 -placement_cfg = '/etc/placement/placement.conf' - -if __name__ == '__main__': - if os.path.isfile(placement_cfg): - try: - config.read(placement_cfg) - except Exception: - LOG.exception('Error while reading placement.conf:') - else: - LOG.error('Placement configuration file %s does not exist', - placement_cfg) - sys.exit(1) - - # get keystone client with details from [keystone_authtoken] section - auth = v3.Password( - user_domain_name=config.get('keystone_authtoken', 'user_domain_name'), - username=config.get('keystone_authtoken', 'username'), - password=config.get('keystone_authtoken', 'password'), - project_name=config.get('keystone_authtoken', 'project_name'), - project_domain_name=config.get('keystone_authtoken', - 'project_domain_name'), - auth_url=config.get('keystone_authtoken', 'auth_url') + '/v3') - sess = session.Session(auth=auth, verify=False) - keystone = client.Client(session=sess, interface='internal') - - iterations_endpoint = iterations - placement_endpoint_url = None - while iterations_endpoint > 1: - iterations_endpoint -= 1 - try: - # get placement service id - placement_service_id = keystone.services.list( - name='placement')[0].id - - # get placement endpoint - # Note: puppet-placement does not support setting the interface - # until we have https://review.opendev.org/688862. - # Lets hard code 'internal' for now. - placement_endpoint_url = keystone.endpoints.list( - service=placement_service_id, - region=config.get('keystone_authtoken', 'region_name'), - interface='internal')[0].url - if not placement_endpoint_url: - LOG.error('Failed to get placement service endpoint!') - else: - break - except Exception: - LOG.exception('Retry - Failed to get placement service endpoint:') - time.sleep(timeout) - - if not placement_endpoint_url: - LOG.error('Failed to get placement service endpoint!') - sys.exit(1) - - # we should have CURRENT in the request response from placement: - # {"versions": [{"status": "CURRENT", "min_version": "1.0", "max_version": - # "1.29", "id": "v1.0", "links": [{"href": "", "rel": "self"}]}]} - response_reg = re.compile('.*CURRENT,*') - - while iterations > 1: - iterations -= 1 - try: - r = requests.get(placement_endpoint_url + '/', verify=False) - if r.status_code == 200 and response_reg.match(r.text): - LOG.info('Placement service up! - %s', r.text) - sys.exit(0) - else: - LOG.info('response - %r', r) - LOG.info('Placement service not up - %s, %s', - r.status_code, - r.text) - except Exception: - LOG.exception('Error query the placement endpoint:') - time.sleep(timeout) - - sys.exit(1) - -# vim: set et ts=4 sw=4 : diff --git a/container_config_scripts/pyshim.sh b/container_config_scripts/pyshim.sh deleted file mode 100755 index e6077fc666..0000000000 --- a/container_config_scripts/pyshim.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright 2018 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Usage: pyshim.sh