3fce10179e
We run the podman containers as root, so when we collect the logs, we must do that as root too, otherwise we won't see the containers. This is why we currently collect no container logs. Also, increase the boot timeout from 600 to 1200 seconds to deal with what appears to be much slower boot times of the test node. Change-Id: Ie146599f7598459c41c2d2d885b90a702bc900bb
103 lines
3.4 KiB
Django/Jinja
103 lines
3.4 KiB
Django/Jinja
elements-dir: {{ NODEPOOL_CONFIG | dirname }}/elements
|
|
images-dir: {{ NODEPOOL_DIB_BASE_PATH }}/images
|
|
|
|
zookeeper-servers:
|
|
- host: localhost
|
|
port: 2281
|
|
|
|
zookeeper-tls:
|
|
ca: /opt/zookeeper/ca/certs/cacert.pem
|
|
cert: /opt/zookeeper/ca/certs/client.pem
|
|
key: /opt/zookeeper/ca/keys/clientkey.pem
|
|
|
|
labels:
|
|
- name: test-image
|
|
min-ready: 1
|
|
|
|
providers:
|
|
- name: devstack
|
|
region-name: RegionOne
|
|
cloud: devstack
|
|
# Long boot timeout to deal with potentially nested virt.
|
|
boot-timeout: 1200
|
|
launch-timeout: 1500
|
|
rate: 0.25
|
|
diskimages:
|
|
- name: test-image
|
|
config-drive: true
|
|
pools:
|
|
- name: main
|
|
max-servers: 5
|
|
networks:
|
|
- private
|
|
labels:
|
|
- name: test-image
|
|
diskimage: test-image
|
|
min-ram: 512
|
|
flavor-name: 'nodepool'
|
|
console-log: True
|
|
key-name: {{ NODEPOOL_KEY_NAME }}
|
|
instance-properties:
|
|
nodepool_devstack: testing
|
|
userdata: |
|
|
#cloud-config
|
|
write_files:
|
|
- content: |
|
|
testpassed
|
|
path: /etc/testfile_nodepool_userdata
|
|
|
|
diskimages:
|
|
- name: test-image
|
|
rebuild-age: 86400
|
|
{% if 'elements-dir' in nodepool_diskimage %}
|
|
elements-dir: '{{ nodepool_diskimage.elements-dir }}'
|
|
{% endif %}
|
|
elements:
|
|
- {{ nodepool_diskimage.base_element }}
|
|
{% if 'extra_elements' in nodepool_diskimage %}
|
|
{% for item in nodepool_diskimage.extra_elements %}
|
|
- {{ item }}
|
|
{% endfor %}
|
|
{% endif %}
|
|
- vm
|
|
- simple-init
|
|
- growroot
|
|
- devuser
|
|
- openssh-server
|
|
- nodepool-setup
|
|
- journal-to-console
|
|
release: '{{ nodepool_diskimage.release }}'
|
|
env-vars:
|
|
TMPDIR: '{{ NODEPOOL_DIB_BASE_PATH }}/tmp'
|
|
# We have seen multiple instances of dib's bootloader not
|
|
# setting the LABEL= command correctly for the installed kernel.
|
|
# If the command line from current kernel on the the builder
|
|
# system leaks into the test build we can hide the problem --
|
|
# when on a gate node (built by dib) the running kernel has in
|
|
# it's command-line LABEL=cloudimg-rootfs. The test image
|
|
# copies this and boots correctly in the gate; but when it hits
|
|
# the production builders, which don't boot like this, we are
|
|
# left with an unbootable image that can't find it's root
|
|
# partition.
|
|
#
|
|
# Thus we override the default root label during tests. This
|
|
# should ensure the root disk gets made with a different label,
|
|
# and if the bootloader doesn't correctly direct the kernel to
|
|
# this, we should get gate failures.
|
|
ROOT_LABEL: 'gate-rootfs'
|
|
DIB_CHECKSUM: '1'
|
|
DIB_SHOW_IMAGE_USAGE: '1'
|
|
DIB_IMAGE_CACHE: '{{ NODEPOOL_DIB_BASE_PATH }}/cache'
|
|
DIB_DEV_USER_AUTHORIZED_KEYS: '{{ NODEPOOL_PUBKEY }}'
|
|
{% if 'mirror' in nodepool_diskimage %}
|
|
DIB_DISTRIBUTION_MIRROR: '{{ nodepool_diskimage.mirror }}'
|
|
{% endif %}
|
|
{% if zuul.projects.get('opendev.org/opendev/glean', {}).get('required', False) %}
|
|
DIB_INSTALLTYPE_simple_init: 'repo'
|
|
DIB_REPOLOCATION_glean: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/opendev/glean'].src_dir }}"
|
|
DIB_REPOREF_glean: "{{ zuul.projects['opendev.org/opendev/glean'].checkout }}"
|
|
{% endif %}
|
|
{% for k, v in nodepool_diskimage.get('env-vars', {}).items() %}
|
|
{{ k }}: "{{ v }}"
|
|
{% endfor %}
|