Remove base role integration testing

These jobs have moved to the zuul-jobs repo.

Change-Id: I6a5759e0fbc8f8a6bb37045ca5254412882972ea
This commit is contained in:
James E. Blair 2019-07-08 10:46:44 -07:00
parent f0839761a6
commit 0c54a6f4e2
23 changed files with 15 additions and 838 deletions

View File

@ -3,12 +3,12 @@
# If you add new tests, also update the files section in job
# base-integration in zuul.d/jobs.yaml.
# Note: set-zuul-log-path-fact is tested by emit-job-header.yaml
- include: emit-job-header.yaml
- include: ensure-output-dirs.yaml
- include: use-cached-repos.yaml
- hosts: all
roles:
- ensure-output-dirs
- include: mirror-info.yaml
- include: configure-mirrors.yaml
- include: fetch-zuul-cloner.yaml
- include: validate-host.yaml
- include: fetch-output.yaml
- hosts: all
roles:
- fetch-output

View File

@ -1,16 +0,0 @@
- name: Test the configure-mirrors role
hosts: all
roles:
- role: configure-mirrors
mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
set_apt_mirrors_trusted: True
post_tasks:
- name: Set emacs package fact for gentoo
set_fact:
emacs_package: app-editors/emacs
when: ansible_distribution == 'Gentoo'
- name: Install a package to sanity check the mirror configuration
package:
name: "{{ emacs_package | default('emacs') }}"
state: "present"
become: yes

View File

@ -1,19 +0,0 @@
- name: Test the emit-job-header role
hosts: all
roles:
- role: emit-job-header
zuul_log_url: "http://logs.openstack.org"
post_tasks:
# All emit-job-header does is a debug statement so the worst that would
# happen would be that the debug task would fail outright and we'd prevent
# something breaking that debug statement from merging just by running it.
# However, the emit-job-header role includes the set-zuul-log-path-fact
# role. We can only test for zuul_log_path against changes, though.
- name: Assert zuul_log_path by set-zuul-log-path-fact for a change
assert:
that:
- zuul_log_path is defined
- zuul.change in zuul_log_path
- zuul.patchset in zuul_log_path
- zuul.pipeline in zuul_log_path
- zuul.job in zuul_log_path

View File

@ -1,32 +0,0 @@
- name: Test the ensure-output-dirs role
hosts: all
roles:
- role: ensure-output-dirs
post_tasks:
- name: Check that log dir has been created
file:
path: "{{ zuul_output_dir }}/logs"
state: directory
register: log_directory
- name: Check that artifact dir has been created
file:
path: "{{ zuul_output_dir }}/artifacts"
state: directory
register: artifact_directory
- name: Check that doc dir has been created
file:
path: "{{ zuul_output_dir }}/docs"
state: directory
register: doc_directory
- name: Validate that directories were set correctly
assert:
that:
- log_directory is not changed
- log_directory is succeeded
- artifact_directory is not changed
- artifact_directory is succeeded
- doc_directory is not changed
- doc_directory is succeeded

View File

@ -1,46 +0,0 @@
- name: Run the fetch-output role
hosts: all
pre_tasks:
# ensure-output-dirs is run before this
- name: Write test log file
copy:
dest: '{{ zuul_output_dir }}/{{ item }}/{{ inventory_hostname }}'
content: '{{ item }}'
loop:
- logs
- docs
- artifacts
roles:
- role: fetch-output
post_tasks:
- name: Check that logs have been pulled
delegate_to: localhost
file:
# log_path fact is set in fetch-output
path: "{{ log_path }}/{{ inventory_hostname }}"
state: file
register: local_log_content
- name: Check that artifacts have been pulled
delegate_to: localhost
file:
path: "{{ zuul.executor.work_root }}/artifacts/{{ inventory_hostname }}"
state: file
register: local_artifact_content
- name: Check that docs have been pulled
delegate_to: localhost
file:
path: "{{ zuul.executor.work_root }}/docs/{{ inventory_hostname }}"
state: file
register: local_doc_content
- name: Validate that files were pulled correctly
assert:
that:
- local_log_content is not changed
- local_log_content is succeeded
- local_artifact_content is not changed
- local_artifact_content is succeeded
- local_doc_content is not changed
- local_doc_content is succeeded

View File

@ -1,84 +0,0 @@
- name: Test the fetch-zuul-cloner role
hosts: all
vars:
destination: "/usr/zuul-env/bin/zuul-cloner"
repo_src_dir: "/home/zuul/src/opendev.org"
roles:
- role: fetch-zuul-cloner
post_tasks:
- name: Check that the directory exists
file:
path: "{{ destination | dirname }}"
state: directory
register: directory
- name: Check that the zuul-cloner shim exists
stat:
path: "{{ destination }}"
register: cloner
- name: Validate that the shim was installed successfully
assert:
that:
- directory is not changed
- directory is succeeded
- cloner.stat.exists
- cloner.stat.mode == "0755"
- name: Zuul clone something in required-projects
shell:
executable: /bin/bash
cmd: |
CLONEMAP=`mktemp`
function cleanup {
rm -f $CLONEMAP
}
trap cleanup EXIT
cat > $CLONEMAP << EOF
clonemap:
- name: openstack/project-config
dest: {{ ansible_user_dir }}
EOF
/usr/zuul-env/bin/zuul-cloner -m $CLONEMAP \
--cache-dir /opt/git https://opendev.org \
openstack/project-config
register: clone_with_required
- name: Check if repository was cloned
stat:
path: "{{ ansible_user_dir }}/src/opendev.org/openstack/project-config"
register: with_required_stat
- name: Zuul clone something not in required-projects
shell:
executable: /bin/bash
cmd: |
CLONEMAP=`mktemp`
function cleanup {
rm -f $CLONEMAP
}
trap cleanup EXIT
cat > $CLONEMAP << EOF
clonemap:
- name: openstack-infra/jenkins-job-builder
dest: {{ ansible_user_dir }}
EOF
/usr/zuul-env/bin/zuul-cloner -m $CLONEMAP \
--cache-dir /opt/git https://git.openstack.org \
openstack-infra/jenkins-job-builder
ignore_errors: yes
register: clone_without_required
- name: Check if repository was cloned
stat:
path: "{{ ansible_user_dir }}/src/git.openstack.org/openstack-infra/jenkins-job-builder"
register: without_required_stat
- name: Validate zuul-cloner shim results
assert:
that:
- clone_with_required is succeeded
- clone_with_required is changed
- with_required_stat.stat.exists
- clone_without_required is failed
- not without_required_stat.stat.exists

View File

@ -1,54 +0,0 @@
- name: Test the multi-node-bridge role
hosts:
- switch
- peers
roles:
- multi-node-bridge
post_tasks:
- become: yes
block:
- name: openvswitch should be installed
package:
name: "{{ ovs_package }}"
state: installed
register: ovs_installed
- name: openvswitch should be running
service:
name: "{{ ovs_service }}"
state: started
enabled: yes
register: ovs_running
- name: bridge should exist
openvswitch_bridge:
bridge: "{{ bridge_name }}"
register: ovs_bridge
- name: port should exist
command: ovs-vsctl show
changed_when: false
register: ovs_port
- name: switch should be reachable
command: ping -c 4 {{ bridge_address_prefix }}.{{ bridge_address_offset }}
changed_when: false
failed_when: false
register: ovs_ping_switch
- name: peer should be reachable
command: ping -c 4 {{ bridge_address_prefix }}.{{ bridge_address_offset + 1 }}
changed_when: false
failed_when: false
register: ovs_ping_peer
- name: assert test results
assert:
that:
- ovs_installed is not changed
- ovs_running is not changed
- ovs_bridge is not changed
- ovs_port.rc == 0
- "'Port \"br-infra_' in ovs_port.stdout"
- ovs_ping_switch.rc == 0
- ovs_ping_peer.rc == 0

View File

@ -1,80 +0,0 @@
- name: Test the persistent-firewall role
hosts: all
roles:
# We're including multi-node-bridge a second time with the toggle for
# enabling firewall rules for the bridge network subnet
# By this time, multi-node-firewall has already ran, we don't need to run
# it again -- we're testing here that both are persisted properly.
- { role: multi-node-bridge, bridge_authorize_internal_traffic: true }
post_tasks:
# NOTE (dmsimard): Using with_first_found and include_vars can yield
# unexpected results, see multinode_firewall_persistence_vars/README.rst
- name: Include OS-specific variables
include_vars: "{{ item }}"
with_first_found:
- "multinode_firewall_persistence_vars/{{ ansible_distribution }}_{{ ansible_distribution_release }}.yaml"
- "multinode_firewall_persistence_vars/{{ ansible_distribution }}.yaml"
- "multinode_firewall_persistence_vars/{{ ansible_os_family }}.yaml"
- "multinode_firewall_persistence_vars/default.yaml"
- name: Flush iptables rules
become: yes
command: "{{ item }}"
with_items:
- iptables --flush
- ip6tables --flush
# NOTE (dmsimard): We're using with_items here because RedHat and Gentoo
# need to restart both iptables and ip6tables.
- name: Restart iptables
become: yes
service:
name: "{{ item }}"
state: restarted
when: iptables_service is defined
with_items: "{{ iptables_service }}"
- name: switch and peer nodes should be in the ipv4 firewall
become: yes
command: iptables-save
changed_when: false
failed_when: false
register: iptables_rules
- name: Validate ipv4 private firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['private_ipv4'] }}/32 -j ACCEPT' in iptables_rules.stdout"
with_items: "{{ groups['all'] }}"
when:
- hostvars[item]['nodepool']['private_ipv4']
- name: Validate ipv4 public firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['public_ipv4'] }}/32 -j ACCEPT' in iptables_rules.stdout"
with_items: "{{ groups['all'] }}"
when:
- hostvars[item]['nodepool']['public_ipv4']
- name: Validate ipv4 bridge firewall configuration
assert:
that:
- "'-A INPUT -s {{ bridge_address_prefix }}.0/{{ bridge_address_subnet }} -d {{ bridge_address_prefix }}.0/{{ bridge_address_subnet }} -j ACCEPT' in iptables_rules.stdout"
with_items: "{{ groups['all'] }}"
# ipv6_addresses is set by the multi-node-firewall role
- when: ipv6_addresses | length > 0
block:
- name: switch and peer nodes should be in the ipv6 firewall
become: yes
command: ip6tables-save
changed_when: false
failed_when: false
register: ip6tables_rules
- name: Validate ipv6 firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['public_ipv6'] }}/128 -j ACCEPT' in ip6tables_rules.stdout"
with_items: "{{ groups['all'] }}"

View File

@ -1,43 +0,0 @@
- name: Test the multi-node-firewall role
hosts: all
roles:
- multi-node-firewall
post_tasks:
- name: switch and peer nodes should be in the ipv4 firewall
become: yes
command: iptables-save
changed_when: false
failed_when: false
register: iptables_rules
- name: Validate ipv4 private firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['private_ipv4'] }}/32 -j ACCEPT' in iptables_rules.stdout"
with_items: "{{ groups['all'] }}"
when:
- hostvars[item]['nodepool']['private_ipv4']
- name: Validate ipv4 public firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['public_ipv4'] }}/32 -j ACCEPT' in iptables_rules.stdout"
with_items: "{{ groups['all'] }}"
when:
- hostvars[item]['nodepool']['public_ipv4']
# ipv6_addresses is set by the multi-node-firewall role
- when: ipv6_addresses | length > 0
block:
- name: switch and peer nodes should be in the ipv6 firewall
become: yes
command: ip6tables-save
changed_when: false
failed_when: false
register: ip6tables_rules
- name: Validate ipv6 firewall configuration
assert:
that:
- "'-A INPUT -s {{ hostvars[item]['nodepool']['public_ipv6'] }}/128 -j ACCEPT' in ip6tables_rules.stdout"
with_items: "{{ groups['all'] }}"

View File

@ -1,25 +0,0 @@
- name: Test the multi-node-hosts-file role
hosts: all
roles:
- multi-node-hosts-file
post_tasks:
- name: lookup hosts file
command: cat /etc/hosts
register: hosts_file
- name: Set up the list of hosts and addresses
set_fact:
host_addresses: >
{% set hosts = [] -%}
{% for host, vars in hostvars.items() -%}
{% set _ = hosts.append({'host': host, 'address': vars['nodepool']['private_ipv4']}) -%}
{% endfor -%}
{{- hosts -}}
- name: assert that hosts are in the hosts file
vars:
line: "{{ item.address }} {{ item.host }}"
assert:
that:
- "line in hosts_file.stdout"
with_list: "{{ host_addresses }}"

View File

@ -1,31 +0,0 @@
- name: Test the multi-node-known-hosts role
hosts: all
roles:
- multi-node-known-hosts
post_tasks:
- name: lookup known_hosts file
command: cat ~/.ssh/known_hosts
register: known_hosts
- name: Set up host addresses
set_fact:
host_addresses: >
{% set hosts = [] -%}
{% for host, vars in hostvars.items() -%}
{% if vars['nodepool']['private_ipv4'] != '' -%}
{% set _ = hosts.append(vars['nodepool']['private_ipv4']) -%}
{% endif -%}
{% if vars['nodepool']['public_ipv4'] != '' -%}
{% set _ = hosts.append(vars['nodepool']['public_ipv4']) -%}
{% endif -%}
{% if vars['nodepool']['public_ipv6'] != '' -%}
{% set _ = hosts.append(vars['nodepool']['public_ipv6']) -%}
{% endif -%}
{% endfor -%}
{{- hosts | sort | unique -}}
- name: assert that hosts are in known_hosts
assert:
that:
- "item in known_hosts.stdout"
with_items: "{{ host_addresses }}"

View File

@ -1,13 +0,0 @@
# 'base' is implicit and runs before multi-node roles
- include: base.yaml
# Roles that are part of the 'multinode' job
# If you add new tests, also update the files section in jobs
# base-integration and multinode-integration in zuul.d/jobs.yaml.
- include: multi-node-known-hosts.yaml
- include: multi-node-hosts-file.yaml
- include: multi-node-firewall.yaml
- include: multi-node-bridge.yaml
- include: multi-node-firewall-persistence.yaml

View File

@ -1,2 +0,0 @@
iptables_service:
- netfilter-persistent

View File

@ -1,3 +0,0 @@
iptables_service:
- iptables-restore
- ip6tables-restore

View File

@ -1,21 +0,0 @@
multinode_firewall_persistence_vars
===================================
This directory is meant to contain distribution specific variables used in
integration tests for the ``multinode_firewall_persistence`` role.
The behavior of the ``with_first_found`` lookup used with the ``include_vars``
module will make it search for the ``vars`` directory in the "usual" order of
precedence which means if there is a ``vars`` directory inside the playbook
directory, it will search there first.
This can result in one of two issues:
1. If you try to prepend ``{{ role_path }}`` to workaround this issue with the
variable file paths, Zuul will deny the lookup if you are running an
untrusted playbook because the role was prepared in a trusted location and
Ansible is trying to search outside the work root as a result.
2. The variables included are the wrong ones -- the ones from
``playbooks/vars`` are loaded instead of ``path/to/<role>/vars``
This is why this directory is called ``multinode_firewall_persistence_vars``.

View File

@ -1,3 +0,0 @@
iptables_service:
- iptables
- ip6tables

View File

@ -1,2 +0,0 @@
iptables_service:
- SuSEfirewall2

View File

@ -1,2 +0,0 @@
iptables_service:
- iptables-persistent

View File

@ -1,24 +0,0 @@
- name: Test the use-cached-repos role
hosts: all
roles:
- role: use-cached-repos
post_tasks:
# openstack/project-config is in 'required-projects'.
# Also check that the project being tested is being prepared.
# We're checking them explicitly rather than with_items on zuul.projects
# in case there is a regression which would take an item out.
- name: Check that openstack/project-config was prepared
stat:
path: "{{ ansible_user_dir }}/src/opendev.org/openstack/project-config"
register: project_config
- name: Check this project was prepared
stat:
path: "{{ ansible_user_dir }}/src/{{ zuul.project.canonical_name }}"
register: self_config
- name: Validate that required projects have been prepared
assert:
that:
- project_config.stat.exists
- self_config.stat.exists

View File

@ -1,13 +0,0 @@
- name: Test the validate-host role
pre_tasks:
# NOTE(pabelanger): Until we hit the validate-host role, we have a minimal
# set of ansible variables collected by zuul-executor. This doesn't include
# network variables (ansible_default_ipv4 / ansible_default_ipv6) so gather
# these variables as they are important to the configure-unbound role.
- name: Gather network facts
setup:
gather_subset: 'network'
hosts: all
roles:
- role: validate-host
zuul_site_traceroute_host: files.openstack.org

View File

@ -4,292 +4,20 @@
# https://opendev.org/cgit/zuul/zuul-jobs
- job:
name: openstack-infra-base-integration
name: openstack-zuul-jobs-test-mirror-info
description: |
Runs roles that are included by default in the 'base' job in order to
prevent regressions. This job should not be used outside the context of
testing roles and playbooks found in project-config, zuul-jobs and
openstack-zuul-jobs.
abstract: true
protected: true
Test the mirror-info role.
This is meant to be included in a base job, so we inherit from
base-minimal to make sure it hasn't already run.
parent: base-minimal
required-projects:
- openstack/project-config
roles:
- zuul: zuul/zuul-jobs
run: tests/base.yaml
files:
- ^roles/configure-mirrors/.*
- ^roles/emit-job-header/.*
- ^roles/ensure-output-dirs/.*
- ^roles/fetch-zuul-cloner/.*
- ^roles/mirror-info/.*
- ^roles/set-zuul-log-path-fact/.*
- ^roles/use-cached-repos/.*
- ^roles/fetch-output/.*
- ^roles/validate-host/.*
- ^tests/.*
- job:
name: openstack-infra-base-integration-centos-7
parent: openstack-infra-base-integration
nodeset: centos-7
- job:
name: openstack-infra-base-integration-debian-stable
parent: openstack-infra-base-integration
nodeset: debian-stable
- job:
name: openstack-infra-base-integration-fedora-latest
parent: openstack-infra-base-integration
nodeset: fedora-latest
- job:
name: openstack-infra-base-integration-gentoo-17-0-systemd
parent: openstack-infra-base-integration
nodeset: gentoo-17-0-systemd
- job:
name: openstack-infra-base-integration-opensuse423
parent: openstack-infra-base-integration
nodeset: opensuse-423
- job:
name: openstack-infra-base-integration-opensuse15
parent: openstack-infra-base-integration
nodeset: opensuse-15
- job:
name: openstack-infra-base-integration-opensuse-tumbleweed
parent: openstack-infra-base-integration
nodeset: opensuse-tumbleweed
- job:
name: openstack-infra-base-integration-ubuntu-bionic
parent: openstack-infra-base-integration
nodeset: ubuntu-bionic
- job:
name: openstack-infra-base-integration-ubuntu-trusty
parent: openstack-infra-base-integration
nodeset: ubuntu-trusty
- job:
name: openstack-infra-base-integration-ubuntu-xenial
parent: openstack-infra-base-integration
nodeset: ubuntu-xenial
- job:
name: openstack-infra-multinode-integration
abstract: true
protected: true
description: |
Runs roles that are included by default in the 'multinode' job in order
to prevent regressions. This job should not be used outside the context
of testing roles and playbooks found in project-config, zuul-jobs and
openstack-zuul-jobs.
parent: base-minimal
vars:
ara_generate_html: true
required-projects:
- openstack/project-config
roles:
- zuul: zuul/zuul-jobs
run: tests/multinode.yaml
files:
- ^roles/configure-mirrors/.*
- ^roles/emit-job-header/.*
- ^roles/ensure-output-dirs/.*
- ^roles/fetch-zuul-cloner/.*
- ^roles/fetch-output/.*
- ^roles/mirror-info/.*
- ^roles/set-zuul-log-path-fact/.*
- ^roles/use-cached-repos/.*
- ^roles/multi-node-bridge/.*
- ^roles/multi-node-firewall/.*
- ^roles/persistent-firewall/.*
- ^roles/multi-node-hosts-file/.*
- ^roles/multi-node-known-hosts/.*
- ^tests/.*
- ^playbooks/multinode/.*
- job:
name: openstack-infra-multinode-integration-centos-7
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: centos-7
- name: secondary
label: centos-7
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-debian-stable
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: debian-stretch
- name: secondary
label: debian-stretch
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-fedora-latest
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: fedora-28
- name: secondary
label: fedora-28
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-gentoo-17-0-systemd
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: gentoo-17-0-systemd
- name: secondary
label: gentoo-17-0-systemd
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-opensuse423
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: opensuse-423
- name: secondary
label: opensuse-423
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
# Being updated to the latest opensuse 15.x release when available
name: openstack-infra-multinode-integration-opensuse15
description: |
Multinode integration job for openSUSE Leap 15.x. Currently
points to Leap 15.0 but will be updated to point to the latest
available 15.x minor release automatically as they stay backward
compatible.
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: opensuse-15
- name: secondary
label: opensuse-15
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-opensuse-tumbleweed
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: opensuse-tumbleweed
- name: secondary
label: opensuse-tumbleweed
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-ubuntu-bionic
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: ubuntu-bionic
- name: secondary
label: ubuntu-bionic
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-ubuntu-trusty
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: ubuntu-trusty
- name: secondary
label: ubuntu-trusty
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: openstack-infra-multinode-integration-ubuntu-xenial
parent: openstack-infra-multinode-integration
nodeset:
nodes:
- name: primary
label: ubuntu-xenial
- name: secondary
label: ubuntu-xenial
groups:
- name: switch
nodes:
- primary
- name: peers
nodes:
- secondary
- job:
name: build-openstack-sphinx-docs
parent: build-sphinx-docs

View File

@ -4,49 +4,11 @@
- publish-tox-docs-infra
check:
jobs:
- openstack-infra-base-integration-centos-7
- openstack-infra-base-integration-debian-stable
- openstack-infra-base-integration-fedora-latest
- openstack-infra-base-integration-gentoo-17-0-systemd
- openstack-infra-base-integration-ubuntu-bionic
- openstack-infra-base-integration-ubuntu-trusty
- openstack-infra-base-integration-ubuntu-xenial
- openstack-infra-base-integration-opensuse423
- openstack-infra-base-integration-opensuse15
- openstack-infra-base-integration-opensuse-tumbleweed
- openstack-infra-multinode-integration-centos-7
- openstack-infra-multinode-integration-debian-stable
- openstack-infra-multinode-integration-fedora-latest
- openstack-infra-multinode-integration-gentoo-17-0-systemd
- openstack-infra-multinode-integration-ubuntu-bionic
- openstack-infra-multinode-integration-ubuntu-trusty
- openstack-infra-multinode-integration-ubuntu-xenial
- openstack-infra-multinode-integration-opensuse423
- openstack-infra-multinode-integration-opensuse15
- openstack-infra-multinode-integration-opensuse-tumbleweed
- openstack-zuul-jobs-test-mirror-info
- openstack-infra-extra-integration-xenial
- openstack-infra-extra-integration-bionic
- openstack-zuul-jobs-linters
gate:
jobs:
- openstack-infra-base-integration-centos-7
- openstack-infra-base-integration-debian-stable
- openstack-infra-base-integration-fedora-latest
- openstack-infra-base-integration-gentoo-17-0-systemd
- openstack-infra-base-integration-ubuntu-bionic
- openstack-infra-base-integration-ubuntu-trusty
- openstack-infra-base-integration-ubuntu-xenial
- openstack-infra-base-integration-opensuse423
- openstack-infra-base-integration-opensuse15
- openstack-infra-base-integration-opensuse-tumbleweed
- openstack-infra-multinode-integration-centos-7
- openstack-infra-multinode-integration-debian-stable
- openstack-infra-multinode-integration-fedora-latest
- openstack-infra-multinode-integration-gentoo-17-0-systemd
- openstack-infra-multinode-integration-ubuntu-bionic
- openstack-infra-multinode-integration-ubuntu-trusty
- openstack-infra-multinode-integration-ubuntu-xenial
- openstack-infra-multinode-integration-opensuse423
- openstack-infra-multinode-integration-opensuse15
- openstack-infra-multinode-integration-opensuse-tumbleweed
- openstack-zuul-jobs-test-mirror-info
- openstack-zuul-jobs-linters