diff --git a/ansible/dev-tools.yml b/ansible/dev-tools.yml index 9fd4e1b55..88617af11 100644 --- a/ansible/dev-tools.yml +++ b/ansible/dev-tools.yml @@ -1,6 +1,6 @@ --- - name: Ensure development tools are installed - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - dev-tools roles: diff --git a/ansible/disable-cloud-init.yml b/ansible/disable-cloud-init.yml index 6cc23e846..033c7a602 100644 --- a/ansible/disable-cloud-init.yml +++ b/ansible/disable-cloud-init.yml @@ -4,7 +4,7 @@ # In some cases cloud-init reconfigure automatically network interface # and cause some issues in network configuration - name: Disable Cloud-init service - hosts: overcloud + hosts: overcloud:infra-vms tags: - disable-cloud-init roles: diff --git a/ansible/disable-glean.yml b/ansible/disable-glean.yml index 11a3f0108..bf583ebdc 100644 --- a/ansible/disable-glean.yml +++ b/ansible/disable-glean.yml @@ -3,7 +3,7 @@ # servers but gets in the way after this as it tries to enable all network # interfaces. In some cases this can lead to timeouts. - name: Ensure Glean is disabled and its artifacts are removed - hosts: seed:overcloud + hosts: seed:overcloud:infra-vms tags: - disable-glean roles: diff --git a/ansible/disable-selinux.yml b/ansible/disable-selinux.yml index 3c3bed1b1..3ce1706c6 100644 --- a/ansible/disable-selinux.yml +++ b/ansible/disable-selinux.yml @@ -1,6 +1,6 @@ --- - name: Disable SELinux and reboot if required - hosts: seed:overcloud + hosts: seed:overcloud:infra-vms tags: - disable-selinux roles: diff --git a/ansible/dnf.yml b/ansible/dnf.yml index 0c1e4279b..bb5aafb3f 100644 --- a/ansible/dnf.yml +++ b/ansible/dnf.yml @@ -1,6 +1,6 @@ --- - name: Ensure DNF repos are configured - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms vars: ansible_python_interpreter: /usr/bin/python3 tags: diff --git a/ansible/group_vars/all/infra-vms b/ansible/group_vars/all/infra-vms new file mode 100644 index 000000000..0d197791d --- /dev/null +++ b/ansible/group_vars/all/infra-vms @@ -0,0 +1,173 @@ +--- +############################################################################### +# Infrastructure VM configuration. + +# Name of the infra VM. +infra_vm_name: "{{ inventory_hostname }}" + +# Memory in MB. +infra_vm_memory_mb: "{{ 16 * 1024 }}" + +# Number of vCPUs. +infra_vm_vcpus: 4 + +# List of volumes. +infra_vm_volumes: + - "{{ infra_vm_root_volume }}" + - "{{ infra_vm_data_volume }}" + +# Root volume. +infra_vm_root_volume: + name: "{{ infra_vm_name }}-root" + pool: "{{ infra_vm_pool }}" + capacity: "{{ infra_vm_root_capacity }}" + format: "{{ infra_vm_root_format }}" + image: "{{ infra_vm_root_image }}" + +# Data volume. +infra_vm_data_volume: + name: "{{ infra_vm_name }}-data" + pool: "{{ infra_vm_pool }}" + capacity: "{{ infra_vm_data_capacity }}" + format: "{{ infra_vm_data_format }}" + +# Name of the storage pool for the infra VM volumes. +infra_vm_pool: default + +# Capacity of the infra VM root volume. +infra_vm_root_capacity: 50G + +# Format of the infra VM root volume. +infra_vm_root_format: qcow2 + +# Base image for the infra VM root volume. Default is +# "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" +# when os_distribution is "ubuntu", or +# "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2" +# otherwise. +infra_vm_root_image: >- + {%- if os_distribution == 'ubuntu' %} + https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img + {%- else -%} + https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2 + {%- endif %} + +# Capacity of the infra VM data volume. +infra_vm_data_capacity: 100G + +# Format of the infra VM data volume. +infra_vm_data_format: qcow2 + +# List of network interfaces to attach to the infra VM. +infra_vm_interfaces: "{{ network_interfaces | sort | map('net_libvirt_vm_network') | list }}" + +# Hypervisor that the VM runs on. +infra_vm_hypervisor: "{{ groups['seed-hypervisor'] | first }}" + +# Customise ansible_ssh_extra_args for the test that checks SSH connectivity +# after provisioning. Defaults to disabling ssh host key checking. +infra_vm_wait_connection_ssh_extra_args: '-o StrictHostKeyChecking=no' + +# OS family. Needed for config drive generation. +infra_vm_os_family: "{{ 'RedHat' if os_distribution == 'centos' else 'Debian' }}" + +############################################################################### +# Infrastructure VM node configuration. + +# User with which to access the infrastructure vm via SSH during bootstrap, in +# order to setup the Kayobe user account. Default is {{ os_distribution }}. +infra_vm_bootstrap_user: "{{ os_distribution }}" + +############################################################################### +# Infrastructure VM network interface configuration. + +# List of networks to which infrastructure vm nodes are attached. +infra_vm_network_interfaces: > + {{ (infra_vm_default_network_interfaces + + infra_vm_extra_network_interfaces) | select | unique | list }} + +# List of default networks to which infrastructure vm nodes are attached. +infra_vm_default_network_interfaces: > + {{ [admin_oc_net_name] | select | unique | list }} + +# List of extra networks to which infrastructure vm nodes are attached. +infra_vm_extra_network_interfaces: [] + +############################################################################### +# Infrastructure VM node software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +infra_vm_mdadm_arrays: [] + +############################################################################### +# Infrastructure VM node encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +infra_vm_luks_devices: [] + +############################################################################### +# Infrastructure VM node LVM configuration. + +# List of infrastructure vm volume groups. See mrlesmithjr.manage-lvm role for +# format. +infra_vm_lvm_groups: "{{ infra_vm_lvm_groups_default + infra_vm_lvm_groups_extra }}" + +# Default list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm +# role for format. +infra_vm_lvm_groups_default: "{{ [infra_vm_lvm_group_data] if infra_vm_lvm_group_data_enabled | bool else [] }}" + +# Additional list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm +# role for format. +infra_vm_lvm_groups_extra: [] + +# Whether a 'data' LVM volume group should exist on the infrastructure vm. By +# default this contains a 'docker-volumes' logical volume for Docker volume +# storage. It will also be used for Docker container and image storage if # +# 'docker_storage_driver' is set to 'devicemapper'. Default is true if # +# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +infra_vm_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" + +# Infrastructure VM LVM volume group for data. See mrlesmithjr.manage-lvm role +# for format. +infra_vm_lvm_group_data: + vgname: data + disks: "{{ infra_vm_lvm_group_data_disks }}" + create: True + lvnames: "{{ infra_vm_lvm_group_data_lvs }}" + +# List of disks for use by infrastructure vm LVM data volume group. Default to +# an invalid value to require configuration. +infra_vm_lvm_group_data_disks: + - changeme + +# List of LVM logical volumes for the data volume group. +infra_vm_lvm_group_data_lvs: + - "{{ infra_vm_lvm_group_data_lv_docker_volumes }}" + +# Docker volumes LVM backing volume. +infra_vm_lvm_group_data_lv_docker_volumes: + lvname: docker-volumes + size: "{{ infra_vm_lvm_group_data_lv_docker_volumes_size }}" + create: True + filesystem: "{{ infra_vm_lvm_group_data_lv_docker_volumes_fs }}" + mount: True + mntp: /var/lib/docker/volumes + +# Size of docker volumes LVM backing volume. +infra_vm_lvm_group_data_lv_docker_volumes_size: 75%VG + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +infra_vm_lvm_group_data_lv_docker_volumes_fs: ext4 + +############################################################################### +# Infrastructure VM node sysctl configuration. + +# Dict of sysctl parameters to set. +infra_vm_sysctl_parameters: {} + +############################################################################### +# Infrastructure VM node user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +infra_vm_users: "{{ users_default }}" diff --git a/ansible/group_vars/infra-vms/ansible-host b/ansible/group_vars/infra-vms/ansible-host new file mode 100644 index 000000000..7692dfac8 --- /dev/null +++ b/ansible/group_vars/infra-vms/ansible-host @@ -0,0 +1,3 @@ +--- +# Host/IP with which to access the infra VMs via SSH. +ansible_host: "{{ admin_oc_net_name | net_ip }}" diff --git a/ansible/group_vars/infra-vms/ansible-user b/ansible/group_vars/infra-vms/ansible-user new file mode 100644 index 000000000..cc7fe96ea --- /dev/null +++ b/ansible/group_vars/infra-vms/ansible-user @@ -0,0 +1,7 @@ +--- +# User with which to access the infra VMs via SSH. +ansible_user: "{{ kayobe_ansible_user }}" + +# User with which to access the infra VMs before the kayobe_ansible_user +# account has been created. +bootstrap_user: "{{ infra_vm_bootstrap_user }}" diff --git a/ansible/group_vars/infra-vms/luks b/ansible/group_vars/infra-vms/luks new file mode 100644 index 000000000..94b1e8c58 --- /dev/null +++ b/ansible/group_vars/infra-vms/luks @@ -0,0 +1,6 @@ +--- +############################################################################### +# Infra VM node encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +luks_devices: "{{ infra_vm_luks_devices }}" diff --git a/ansible/group_vars/infra-vms/lvm b/ansible/group_vars/infra-vms/lvm new file mode 100644 index 000000000..fd676af20 --- /dev/null +++ b/ansible/group_vars/infra-vms/lvm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Infra VM node LVM configuration. + +# List of LVM volume groups. +lvm_groups: "{{ infra_vm_lvm_groups }}" diff --git a/ansible/group_vars/infra-vms/mdadm b/ansible/group_vars/infra-vms/mdadm new file mode 100644 index 000000000..8c3c89ccb --- /dev/null +++ b/ansible/group_vars/infra-vms/mdadm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Infra VM node software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +mdadm_arrays: "{{ infra_vm_mdadm_arrays }}" diff --git a/ansible/group_vars/infra-vms/network b/ansible/group_vars/infra-vms/network new file mode 100644 index 000000000..32d5eabb6 --- /dev/null +++ b/ansible/group_vars/infra-vms/network @@ -0,0 +1,6 @@ +--- +############################################################################### +# Network interface attachments. + +# List of networks to which these nodes are attached. +network_interfaces: "{{ infra_vm_network_interfaces | unique | list }}" diff --git a/ansible/group_vars/infra-vms/sysctl b/ansible/group_vars/infra-vms/sysctl new file mode 100644 index 000000000..1a9fd2b75 --- /dev/null +++ b/ansible/group_vars/infra-vms/sysctl @@ -0,0 +1,3 @@ +--- +# Dict of sysctl parameters to set. +sysctl_parameters: "{{ infra_vm_sysctl_parameters }}" diff --git a/ansible/group_vars/infra-vms/users b/ansible/group_vars/infra-vms/users new file mode 100644 index 000000000..f8951d3aa --- /dev/null +++ b/ansible/group_vars/infra-vms/users @@ -0,0 +1,4 @@ +--- +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +users: "{{ infra_vm_users }}" diff --git a/ansible/host-command-run.yml b/ansible/host-command-run.yml index 18a9dd880..b074ca1ed 100644 --- a/ansible/host-command-run.yml +++ b/ansible/host-command-run.yml @@ -1,7 +1,7 @@ --- - name: Run a command gather_facts: False - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tasks: - name: Run a command shell: "{{ host_command_to_run }}" diff --git a/ansible/host-package-update.yml b/ansible/host-package-update.yml index 3e68bb3c8..f5e924c26 100644 --- a/ansible/host-package-update.yml +++ b/ansible/host-package-update.yml @@ -1,6 +1,6 @@ --- - name: Update host packages - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms vars: # Optionally set this to a list of packages to update. Default behaviour is # to update all packages. diff --git a/ansible/infra-vm-deprovision.yml b/ansible/infra-vm-deprovision.yml new file mode 100644 index 000000000..f350f9e0d --- /dev/null +++ b/ansible/infra-vm-deprovision.yml @@ -0,0 +1,38 @@ +--- + +- name: Set facts about infra VMs + gather_facts: false + hosts: "{{ infra_vm_limit | default('infra-vms') }}" + tags: + - always + tasks: + - name: Group virtual machines by hypervisor + group_by: + key: infra_vms_{{ infra_vm_hypervisor }} + # FIXME(mgoddard): Is delegate_to necessary? + delegate_to: "{{ infra_vm_hypervisor }}" + changed_when: false + +- name: Ensure defined infra VMs are destroyed + hosts: hypervisors + tags: + - infra-vm-deprovision + tasks: + - import_role: + name: infra-vms + vars: + infra_vm_action: destroy + infra_vm_vms: "{{ groups['infra_vms_' ~ inventory_hostname ] | default([]) }}" + +- name: Set facts about infra VMs + gather_facts: false + hosts: "{{ infra_vm_limit | default('infra-vms') }}" + tags: + - infra-vm-deprovision + tasks: + - name: Remove host key from known hosts + known_hosts: + name: "{{ ansible_host }}" + state: "absent" + delegate_to: localhost + throttle: 1 diff --git a/ansible/infra-vm-provision.yml b/ansible/infra-vm-provision.yml new file mode 100644 index 000000000..35eba3fa4 --- /dev/null +++ b/ansible/infra-vm-provision.yml @@ -0,0 +1,44 @@ +--- + +- name: Set facts about infra VMs + gather_facts: false + hosts: "{{ infra_vm_limit | default('infra-vms') }}" + tags: + - always + tasks: + - name: Group virtual machines by hypervisor + group_by: + key: infra_vms_{{ infra_vm_hypervisor }} + # FIXME(mgoddard): Is delegate_to necessary? + delegate_to: "{{ infra_vm_hypervisor }}" + changed_when: false + +- name: Ensure defined infra VMs are deployed + hosts: hypervisors + tags: + - infra-vm-provision + tasks: + - import_role: + name: infra-vms + vars: + infra_vm_vms: "{{ groups['infra_vms_' ~ inventory_hostname ] | default([]) }}" + +- name: Wait for infra VMs to be accessible + hosts: "{{ infra_vm_limit | default('infra-vms') }}" + gather_facts: false + tags: + - infra-vm-provision + tasks: + - name: Wait for a connection to VM with bootstrap user + wait_for_connection: + # NOTE: Ensure we exceed the 5 minute DHCP timeout of the eth0 + # interface if necessary. + timeout: 600 + vars: + # NOTE(wszumski): ansible_host_key_checking variable doesn't seem to + # work, But it would be nice not to fail if the host_key changes. + # We check the hostkey during host configure. + # https://github.com/ansible/ansible/blob/1c34492413dec09711c430745034db0c108227a9/lib/ansible/plugins/connection/ssh.py#L49 + # https://github.com/ansible/ansible/issues/49254 + ansible_ssh_extra_args: '{{ infra_vm_wait_connection_ssh_extra_args }}' + ansible_user: "{{ bootstrap_user }}" diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml index 405345a5a..dd31918eb 100644 --- a/ansible/ip-allocation.yml +++ b/ansible/ip-allocation.yml @@ -1,6 +1,6 @@ --- - name: Ensure IP addresses are allocated - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - ip-allocation gather_facts: no diff --git a/ansible/kayobe-ansible-user.yml b/ansible/kayobe-ansible-user.yml index 68f716088..5ca11cf02 100644 --- a/ansible/kayobe-ansible-user.yml +++ b/ansible/kayobe-ansible-user.yml @@ -7,7 +7,7 @@ # bootstrap process if the account is inaccessible. - name: Determine whether user bootstrapping is required - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms gather_facts: false tags: - kayobe-ansible-user diff --git a/ansible/kayobe-target-venv.yml b/ansible/kayobe-target-venv.yml index 02bd526f7..14a8b8761 100644 --- a/ansible/kayobe-target-venv.yml +++ b/ansible/kayobe-target-venv.yml @@ -3,7 +3,7 @@ # when running kayobe. - name: Ensure a virtualenv exists for kayobe - hosts: seed:seed-hypervisor:overcloud + hosts: seed:seed-hypervisor:overcloud:infra-vms gather_facts: False tags: - kayobe-target-venv diff --git a/ansible/luks.yml b/ansible/luks.yml index 6aa65c4b2..8dea1146a 100644 --- a/ansible/luks.yml +++ b/ansible/luks.yml @@ -1,6 +1,6 @@ --- - name: Ensure encryption configuration is applied - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - luks tasks: diff --git a/ansible/lvm.yml b/ansible/lvm.yml index 5ca9f8a9c..4403b9d56 100644 --- a/ansible/lvm.yml +++ b/ansible/lvm.yml @@ -1,6 +1,6 @@ --- - name: Ensure LVM configuration is applied - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - lvm - upgrade-check diff --git a/ansible/mdadm.yml b/ansible/mdadm.yml index 617b65e47..6b907777e 100644 --- a/ansible/mdadm.yml +++ b/ansible/mdadm.yml @@ -1,6 +1,6 @@ --- - name: Ensure software RAID configuration is applied - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - mdadm roles: diff --git a/ansible/network.yml b/ansible/network.yml index 5ff0b69f1..51d93b91e 100644 --- a/ansible/network.yml +++ b/ansible/network.yml @@ -1,6 +1,6 @@ --- - name: Ensure networking is configured - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - config - network diff --git a/ansible/pip.yml b/ansible/pip.yml index bd00ec922..f9ca03775 100644 --- a/ansible/pip.yml +++ b/ansible/pip.yml @@ -1,6 +1,6 @@ --- - name: Configure local PyPi mirror - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - pip vars: diff --git a/ansible/roles/infra-vms/defaults/main.yml b/ansible/roles/infra-vms/defaults/main.yml new file mode 100644 index 000000000..c81b775d0 --- /dev/null +++ b/ansible/roles/infra-vms/defaults/main.yml @@ -0,0 +1,7 @@ +--- + +# Either 'deploy' or 'destroy'. +infra_vm_action: deploy + +# List of inventory hostnames of infra VMs mapped to this hypervisor. +infra_vm_vms: [] diff --git a/ansible/roles/infra-vms/tasks/deploy.yml b/ansible/roles/infra-vms/tasks/deploy.yml new file mode 100644 index 000000000..71268061b --- /dev/null +++ b/ansible/roles/infra-vms/tasks/deploy.yml @@ -0,0 +1,73 @@ +--- + +- name: "[{{ vm_name }}] Ensure that the VM configdrive exists" + include_role: + name: jriguera.configdrive + vars: + configdrive_os_family: "{{ vm_hostvars.infra_vm_os_family }}" + configdrive_uuid: "{{ vm_name | to_uuid }}" + # Must set configdrive_instance_dir when using a loop + # https://github.com/jriguera/ansible-role-configdrive/blob/8438592c84585c86e62ae07e526d3da53629b377/tasks/main.yml#L17 + configdrive_instance_dir: "{{ configdrive_uuid }}" + configdrive_fqdn: "{{ vm_name }}" + configdrive_name: "{{ vm_name }}" + configdrive_ssh_public_key: "{{ lookup('file', ssh_public_key_path) }}" + configdrive_config_dir: "{{ image_cache_path }}" + configdrive_volume_path: "{{ image_cache_path }}" + configdrive_config_dir_delete: False + configdrive_resolv: + domain: "{{ vm_hostvars.resolv_domain | default }}" + search: "{{ vm_hostvars.resolv_search | default }}" + dns: "{{ vm_hostvars.resolv_nameservers | default([]) }}" + configdrive_network_device_list: > + {{ vm_hostvars.network_interfaces | + map('net_configdrive_network_device', vm_hostvars.inventory_hostname) | + list }} + +- name: "[{{ vm_name }}] Set a fact containing the configdrive image path" + set_fact: + vm_configdrive_path: "{{ image_cache_path }}/{{ vm_name }}.iso" + +- name: "[{{ vm_name }}] Ensure configdrive is decoded and decompressed" + shell: > + base64 -d {{ image_cache_path }}/{{ vm_name | to_uuid }}.gz + | gunzip + > {{ vm_configdrive_path }} + +- name: "[{{ vm_name }}] Ensure unnecessary files are removed" + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ image_cache_path }}/{{ vm_name | to_uuid }}.gz" + +- name: "[{{ vm_name }}] Check the size of the configdrive image" + stat: + path: "{{ vm_configdrive_path }}" + get_checksum: False + get_md5: False + mime: False + register: stat_result + +- name: "[{{ vm_name }}] Ensure that the VM is provisioned" + include_role: + name: stackhpc.libvirt-vm + vars: + vm_configdrive_device: cdrom + vm_configdrive_volume: + name: "{{ vm_name }}-configdrive" + pool: "{{ vm_hostvars.infra_vm_pool }}" + # Round size up to next multiple of 4096. + capacity: "{{ (stat_result.stat.size + 4095) // 4096 * 4096 }}" + device: "{{ vm_configdrive_device }}" + format: "raw" + image: "{{ vm_configdrive_path }}" + remote_src: true + libvirt_vm_image_cache_path: "{{ image_cache_path }}" + libvirt_vms: + - name: "{{ vm_name }}" + memory_mb: "{{ vm_hostvars.infra_vm_memory_mb }}" + vcpus: "{{ vm_hostvars.infra_vm_vcpus }}" + volumes: "{{ vm_hostvars.infra_vm_volumes + [vm_configdrive_volume] }}" + interfaces: "{{ vm_hostvars.infra_vm_interfaces }}" + console_log_enabled: true diff --git a/ansible/roles/infra-vms/tasks/destroy.yml b/ansible/roles/infra-vms/tasks/destroy.yml new file mode 100644 index 000000000..a621e4e30 --- /dev/null +++ b/ansible/roles/infra-vms/tasks/destroy.yml @@ -0,0 +1,16 @@ +--- + +- name: Destroy VMs + import_role: + name: stackhpc.libvirt-vm + vars: + infra_vm_configdrive_volume: + name: "{{ vm_name }}-configdrive" + pool: "{{ hostvars[vm_hostvars.infra_vm_hypervisor].infra_vm_pool }}" + libvirt_vms: + - name: "{{ vm_name }}" + memory_mb: "{{ vm_hostvars.infra_vm_memory_mb }}" + vcpus: "{{ vm_hostvars.infra_vm_vcpus }}" + volumes: "{{ vm_hostvars.infra_vm_volumes + [infra_vm_configdrive_volume] }}" + state: "absent" + become: True diff --git a/ansible/roles/infra-vms/tasks/main.yml b/ansible/roles/infra-vms/tasks/main.yml new file mode 100644 index 000000000..9fec42e3c --- /dev/null +++ b/ansible/roles/infra-vms/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- import_tasks: prerequisites.yml + +- name: list all VMs on hypervisor + virt: + command: list_vms + register: all_vms + become: true + +- name: "{{ infra_vm_action | capitalize }} infra VMs (loop)" + include_tasks: "{{ infra_vm_action }}.yml" + vars: + vm_name: "{{ vm_hostvars.infra_vm_name }}" + vm_hostvars: "{{ hostvars[vm_item] }}" + loop: "{{ infra_vm_vms }}" + when: (infra_vm_action == "deploy" and vm_name not in all_vms.list_vms) or infra_vm_action == "destroy" + loop_control: + loop_var: vm_item diff --git a/ansible/roles/infra-vms/tasks/prerequisites.yml b/ansible/roles/infra-vms/tasks/prerequisites.yml new file mode 100644 index 000000000..a0600e895 --- /dev/null +++ b/ansible/roles/infra-vms/tasks/prerequisites.yml @@ -0,0 +1,18 @@ +--- +# NOTE(priteau): On seed hypervisors running CentOS 8, the configdrive role +# will fail to install coreutils if coreutils-single is already present. +# Until the role handles it, install it using the --allowerasing option +# which will remove coreutils-single. +- name: Ensure coreutils package is installed + command: "dnf install coreutils -y --allowerasing" + become: True + when: + - ansible_facts.os_family == 'RedHat' + +- name: Ensure the image cache directory exists + file: + path: "{{ image_cache_path }}" + state: directory + owner: "{{ ansible_facts.user_uid }}" + group: "{{ ansible_facts.user_gid }}" + become: True diff --git a/ansible/sysctl.yml b/ansible/sysctl.yml index eb8695342..0f4ad7435 100644 --- a/ansible/sysctl.yml +++ b/ansible/sysctl.yml @@ -1,6 +1,6 @@ --- - name: Ensure sysctl parameters are configured - hosts: seed:seed-hypervisor:overcloud + hosts: seed:seed-hypervisor:overcloud:infra-vms tags: - sysctl roles: diff --git a/ansible/time.yml b/ansible/time.yml index 3c88cfa2a..33f8ad7ba 100644 --- a/ansible/time.yml +++ b/ansible/time.yml @@ -1,6 +1,6 @@ --- - name: Ensure timezone is configured - hosts: seed-hypervisor:seed:overcloud + hosts: seed-hypervisor:seed:overcloud:infra-vms tags: - timezone tasks: diff --git a/ansible/users.yml b/ansible/users.yml index 666eaaed3..6a89b7697 100644 --- a/ansible/users.yml +++ b/ansible/users.yml @@ -1,6 +1,6 @@ --- - name: Ensure users exist - hosts: seed:seed-hypervisor:overcloud + hosts: seed:seed-hypervisor:overcloud:infra-vms tags: - users roles: diff --git a/doc/source/administration/index.rst b/doc/source/administration/index.rst index d1790508b..79cee41ce 100644 --- a/doc/source/administration/index.rst +++ b/doc/source/administration/index.rst @@ -10,5 +10,6 @@ administrative tasks. general seed + infra-vms overcloud bare-metal diff --git a/doc/source/administration/infra-vms.rst b/doc/source/administration/infra-vms.rst new file mode 100644 index 000000000..5633d1a00 --- /dev/null +++ b/doc/source/administration/infra-vms.rst @@ -0,0 +1,79 @@ +======================= +Infra VM Administration +======================= + +Deprovisioning Infrastructure VMs +================================= + +.. note:: + + This step will destroy the infrastructure VMs and associated data volumes. + Make sure you backup any data you want to keep. + +To deprovision all VMs:: + + (kayobe) $ kayobe infra vm deprovision + +This can be limited to a subset of the nodes using the ``--limit`` option:: + + (kayobe) $ kayobe infra vm deprovision --limit example-vm-1 + +Updating Packages +================= + +It is possible to update packages on the infrastructure VMs. + +Package Repositories +-------------------- + +If using custom DNF package repositories on CentOS, it may be necessary to +update these prior to running a package update. To do this, update the +configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following +command:: + + (kayobe) $ kayobe infra vm host configure --tags dnf + +Package Update +-------------- + +To update one or more packages:: + + (kayobe) $ kayobe infra vm host package update --packages , + +To update all eligible packages, use ``*``, escaping if necessary:: + + (kayobe) $ kayobe infra vm host package update --packages "*" + +To only install updates that have been marked security related:: + + (kayobe) $ kayobe infra vm host package update --packages "*" --security + +Note that these commands do not affect packages installed in containers, only +those installed on the host. + +Kernel Updates +-------------- + +If the kernel has been updated, you will probably want to reboot the host +to boot into the new kernel. This can be done using a command such as the +following:: + + (kayobe) $ kayobe infra vm host command run --command "shutdown -r" --become + +Running Commands +================ + +It is possible to run a command on the host:: + + (kayobe) $ kayobe infra vm host command run --command "" + +For example:: + + (kayobe) $ kayobe infra vm host command run --command "service docker restart" + +Commands can also be run on the seed hypervisor host, if one is in use:: + + (kayobe) $ kayobe seed hypervisor host command run --command "" + +To execute the command with root privileges, add the ``--become`` argument. +Adding the ``--verbose`` argument allows the output of the command to be seen. diff --git a/doc/source/configuration/reference/index.rst b/doc/source/configuration/reference/index.rst index 380144bce..2085025a1 100644 --- a/doc/source/configuration/reference/index.rst +++ b/doc/source/configuration/reference/index.rst @@ -21,4 +21,5 @@ options. ironic-python-agent docker-registry seed-custom-containers + infra-vms nova-cells diff --git a/doc/source/configuration/reference/infra-vms.rst b/doc/source/configuration/reference/infra-vms.rst new file mode 100644 index 000000000..baa46faca --- /dev/null +++ b/doc/source/configuration/reference/infra-vms.rst @@ -0,0 +1,97 @@ +.. _configuration-infra-vms: + +================== +Infrastructure VMs +================== + +Kayobe can deploy infrastructure VMs to the seed-hypervisor. These can be used +to provide supplementary services that do not run well within a containerised +environment or are dependencies of the control plane. + +Configuration +============= + +To deploy an infrastructure VM, add a new host to the the ``infra-vms`` group +in the inventory: + +.. code-block:: ini + :caption: ``$KAYOBE_CONFIG_PATH/inventory/infra-vms`` + + [infra-vms] + an-example-vm + +The configuration of the virtual machine should be done using ``host_vars``. +These override the ``group_vars`` defined for the ``infra-vms`` group. Most +variables have sensible defaults defined, but there are a few variables which +must be set. + +Mandatory variables +------------------- + +All networks must have an interface defined, as described in +:ref:`configuration-network-per-host`. By default the VMs are attached +to the admin overcloud network. If, for example, ``admin_oc_net_name`` was +set to ``example_net``, you would need to define ``example_net_interface``. +It is possible to change the list of networks that a VM is attached to +by modifying ``infra_vm_network_interfaces``. Additional interfaces +can be added by setting ``infra_vm_network_interfaces_extra``. + +List of Kayobe applied defaults to required docker_container variables. +Any of these variables can be overridden with a ``host_var``. + +.. literalinclude:: ../../../../ansible/group_vars/all/infra-vms + :language: yaml + +Customisations +-------------- + +Examples of common customisations are shown below. + +By default the Ansible inventory name is used as the name of the VM. This may +be overridden via ``infra_vm_name``: + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm`` + + # Name of the infra VM. + infra_vm_name: "the-special-one" + +By default the VM has 16G of RAM. This may be changed via +``infra_vm_memory_mb``: + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm`` + + # Memory in MB. Defaults to 16GB. + infra_vm_memory_mb: "{{ 8 * 1024 }}" + +The default network configuration attaches infra VMs to the admin network. If +this is not appropriate, modify ``infra_vm_network_interfaces``. At a minimum +the network interface name for the network should be defined. + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm`` + + # Network interfaces that the VM is attached to. + infra_vm_network_interfaces: + - aio + + # Mandatory: All networks must have an interface defined. + aio_interface: eth0 + + # By default kayobe will connect to a host via ``admin_oc_net``. + # As we have not attached this VM to this network, we must override + # ansible_host. + ansible_host: "{{ 'aio' | net_ip }}" + +Configuration for all VMs can be set using ``extra_vars`` defined in +``$KAYOBE_CONFIG_PATH/infra-vms.yml``. Note that normal Ansible precedence +rules apply and the variables will override any ``host_vars``. If you need to +override the defaults, but still maintain per-host settings, use ``group_vars`` +instead. + +Deploying the virtual machine +============================= + +Once the initial configuration has been done follow the steps in +:ref:`deployment-infrastructure-vms`. diff --git a/doc/source/configuration/reference/network.rst b/doc/source/configuration/reference/network.rst index dcfe6b3e6..7b868501a 100644 --- a/doc/source/configuration/reference/network.rst +++ b/doc/source/configuration/reference/network.rst @@ -390,6 +390,8 @@ An interface will be assigned an IP address if the associated network has a the ``allocation_pool_start`` and ``allocation_pool_end`` attributes, if one has not been statically assigned in ``network-allocation.yml``. +.. _configuration-network-interface: + Configuring Ethernet Interfaces ------------------------------- diff --git a/doc/source/custom-ansible-playbooks.rst b/doc/source/custom-ansible-playbooks.rst index 44616a6a3..7941a0f79 100644 --- a/doc/source/custom-ansible-playbooks.rst +++ b/doc/source/custom-ansible-playbooks.rst @@ -169,6 +169,8 @@ Then, to run the ``foo.yml`` playbook:: (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/foo.yml +.. _custom-playbooks-hooks: + Hooks ===== diff --git a/doc/source/deployment.rst b/doc/source/deployment.rst index b83324166..b1d28edcc 100644 --- a/doc/source/deployment.rst +++ b/doc/source/deployment.rst @@ -75,6 +75,8 @@ Seed Hypervisor bare metal host or a VM provisioned outside of Kayobe, this section may be skipped. +.. _deployment-seed-hypervisor-host-configure: + Host Configuration ------------------ @@ -110,6 +112,8 @@ volumes. To provision the seed VM:: When this command has completed the seed VM should be active and accessible via SSH. Kayobe will update the Ansible inventory with the IP address of the VM. +.. _deployment-seed-host-configure: + Host Configuration ------------------ @@ -184,7 +188,7 @@ After this command has completed the seed services will be active. :ref:`configuration-bifrost-overcloud-root-image` provides information on configuring the root disk image build process. See :ref:`here ` for information about deploying - additional, custom containers on seed node. + additional, custom services (containers) on a seed node. Building Deployment Images -------------------------- @@ -236,6 +240,112 @@ Leave the seed VM and return to the shell on the Ansible control host:: $ exit +.. _deployment-infrastructure-vms: + +Infrastructure VMs +=================== + +.. warning:: + + Support for infrastructure VMs is considered experimental: its + design may change in future versions without a deprecation period. + +.. note:: + + It necessary to perform some configuration before these steps + can be followed. Please see :ref:`configuration-infra-vms`. + +VM Provisioning +--------------- + +The hypervisor used to host a VM is controlled via the ``infra_vm_hypervisor`` +variable. It defaults to use the seed hypervisor. All hypervisors should have +CentOS or Ubuntu with ``libvirt`` installed. It should have ``libvirt`` networks +configured for all networks that the VM needs access to and a ``libvirt`` +storage pool available for the VM's volumes. The steps needed for for the +:ref:`seed` and the +:ref:`seed hypervisor` can be found +above. + +To provision the infra VMs:: + + (kayobe) $ kayobe infra vm provision + +When this command has completed the infra VMs should be active and accessible +via SSH. Kayobe will update the Ansible inventory with the IP address of the +VM. + +Host Configuration +------------------ + +To configure the infra VM host OS:: + + (kayobe) $ kayobe infra vm host configure + +.. note:: + + If the infra VM host uses disks that have been in use in a previous + installation, it may be necessary to wipe partition and LVM data from those + disks. To wipe all disks that are not mounted during host configuration:: + + (kayobe) $ kayobe infra vm host configure --wipe-disks + +.. seealso:: + + Information on configuration of hosts is available :ref:`here + `. + +Using Hooks to deploy services on the VMs +----------------------------------------- + +A no-op service deployment command is provided to perform additional +configuration. The intention is for users to define :ref:`hooks to custom +playbooks ` that define any further configuration or +service deployment necessary. + +To trigger the hooks:: + + (kayobe) $ kayobe infra vm service deploy + +Example +^^^^^^^ + +In this example we have an infra VM host called ``dns01`` that provides DNS +services. The host could be added to a ``dns-servers`` group in the inventory: + +.. code-block:: ini + :caption: ``$KAYOBE_CONFIG_PATH/inventory/infra-vms`` + + [dns-servers] + an-example-vm + + [infra-vms:children] + dns-servers + +We have a custom playbook targeting the ``dns-servers`` group that sets up +the DNS server: + +.. code-block:: yaml + :caption: ``$KAYOBE_CONFIG_PATH/ansible/dns-server.yml`` + + --- + - name: Deploy DNS servers + hosts: dns-servers + tasks: + - name: Install bind packages + package: + name: + - bind + - bind-utils + become: true + +Finally, we add a symlink to set up the playbook as a hook for the ``kayobe +infra vm service deploy`` command:: + + (kayobe) $ mkdir -p ${KAYOBE_CONFIG_PATH}/hooks/infra-vm-host-configure/post.d + (kayobe) $ cd ${KAYOBE_CONFIG_PATH}/hooks/infra-vm-host-configure/post.d + (kayobe) $ ln -s ../../../ansible/dns-server.yml 50-dns-serveryml + Overcloud ========= diff --git a/etc/kayobe/infra-vms.yml b/etc/kayobe/infra-vms.yml new file mode 100644 index 000000000..e5762b161 --- /dev/null +++ b/etc/kayobe/infra-vms.yml @@ -0,0 +1,146 @@ +--- +############################################################################### +# Infrastructure VM configuration. + +# Name of the infra VM. +#infra_vm_name: + +# Memory in MB. +#infra_vm_memory_mb: + +# Number of vCPUs. +#infra_vm_vcpus: + +# List of volumes. +#infra_vm_volumes: + +# Root volume. +#infra_vm_root_volume: + +# Data volume. +#infra_vm_data_volume: + +# Name of the storage pool for the infra VM volumes. +#infra_vm_pool: + +# Capacity of the infra VM root volume. +#infra_vm_root_capacity: + +# Format of the infra VM root volume. +#infra_vm_root_format: + +# Base image for the infra VM root volume. Default is +# "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" +# when os_distribution is "ubuntu", or +# "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2" +# otherwise. +#infra_vm_root_image: + +# Capacity of the infra VM data volume. +#infra_vm_data_capacity: + +# Format of the infra VM data volume. +#infra_vm_data_format: + +# List of network interfaces to attach to the infra VM. +#infra_vm_interfaces: + +# Hypervisor that the VM runs on. +#infra_vm_hypervisor: + +# Customise ansible_ssh_extra_args for the test that checks SSH connectivity +# after provisioning. Defaults to disabling ssh host key checking. +#infra_vm_wait_connection_ssh_extra_args: + +# OS family. Needed for config drive generation. +# infra_vm_os_family: + +############################################################################### +# Infrastructure VM node configuration. + +# User with which to access the infrastructure vm via SSH during bootstrap, in +# order to setup the Kayobe user account. +#infra_vm_bootstrap_user: + +############################################################################### +# Infrastructure VM network interface configuration. + +# List of networks to which infrastructure vm nodes are attached. +#infra_vm_network_interfaces: + +# List of default networks to which infrastructure vm nodes are attached. +#infra_vm_default_network_interfaces: + +# List of extra networks to which infrastructure vm nodes are attached. +#infra_vm_extra_network_interfaces: + +############################################################################### +# Infrastructure VM node software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +#infra_vm_mdadm_arrays: + +############################################################################### +# Infrastructure VM node encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +#infra_vm_luks_devices: + +############################################################################### +# Infrastructure VM node LVM configuration. + +# List of infrastructure vm volume groups. See mrlesmithjr.manage-lvm role for +# format. +#infra_vm_lvm_groups: + +# Default list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm +# role for format. +#infra_vm_lvm_groups_default: + +# Additional list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm +# role for format. +#infra_vm_lvm_groups_extra: + +# Whether a 'data' LVM volume group should exist on the infrastructure vm. By +# default this contains a 'docker-volumes' logical volume for Docker volume +# storage. It will also be used for Docker container and image storage if +# 'docker_storage_driver' is set to 'devicemapper'. Default is true if +# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +#infra_vm_lvm_group_data_enabled: + +# Infrastructure VM LVM volume group for data. See mrlesmithjr.manage-lvm role +# for format. +#infra_vm_lvm_group_data: + +# List of disks for use by infrastructure vm LVM data volume group. Default to +# an invalid value to require configuration. +#infra_vm_lvm_group_data_disks: + +# List of LVM logical volumes for the data volume group. +#infra_vm_lvm_group_data_lvs: + +# Docker volumes LVM backing volume. +#infra_vm_lvm_group_data_lv_docker_volumes: + +# Size of docker volumes LVM backing volume. +#infra_vm_lvm_group_data_lv_docker_volumes_size: + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +#infra_vm_lvm_group_data_lv_docker_volumes_fs: + +############################################################################### +# Infrastructure VM node sysctl configuration. + +# Dict of sysctl parameters to set. +#infra_vm_sysctl_parameters: + +############################################################################### +# Infrastructure VM node user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +#infra_vm_users: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups index fa1ced47c..16619fed8 100644 --- a/etc/kayobe/inventory/groups +++ b/etc/kayobe/inventory/groups @@ -14,6 +14,16 @@ # Build container images on the seed by default. seed +############################################################################### +# Infra VM groups. + +[hypervisors:children] +# Group that contains all hypervisors used for infra VMs +seed-hypervisor + +[infra-vms] +# Empty group to provide declaration of infra-vms group. + ############################################################################### # Overcloud groups. diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py index 8a7ffc6fd..c32fa5296 100644 --- a/kayobe/cli/commands.py +++ b/kayobe/cli/commands.py @@ -823,6 +823,171 @@ class SeedDeploymentImageBuild(KayobeAnsibleMixin, VaultMixin, Command): extra_vars=extra_vars) +class InfraVMProvision(KayobeAnsibleMixin, VaultMixin, Command): + """Provisions infra virtual machines + + * Allocate IP addresses for all configured networks. + * Provision a virtual machine using libvirt. + """ + + def take_action(self, parsed_args): + self.app.LOG.debug("Provisioning infra VMs") + self.run_kayobe_playbook(parsed_args, + _get_playbook_path("ip-allocation"), + limit="infra-vms") + + limit_arg = utils.intersect_limits(parsed_args.limit, "infra-vms") + # We want the limit to affect one play only. To do this we use a + # variable to override the hosts list instead of using limit. + extra_vars = { + "infra_vm_limit": limit_arg + } + + self.run_kayobe_playbook(parsed_args, + _get_playbook_path("infra-vm-provision"), + ignore_limit=True, extra_vars=extra_vars) + + +class InfraVMDeprovision(KayobeAnsibleMixin, VaultMixin, Command): + """Deprovisions infra virtual machines. + + This will destroy all infra VMs and all associated volumes. + """ + + def take_action(self, parsed_args): + self.app.LOG.debug("Deprovisioning infra VMs") + # We want the limit to affect one play only. To do this we use a + # variable to override the hosts list instead of using limit. + limit_arg = utils.intersect_limits(parsed_args.limit, "infra-vms") + extra_vars = { + "infra_vm_limit": limit_arg + } + + self.run_kayobe_playbook(parsed_args, + _get_playbook_path("infra-vm-deprovision"), + ignore_limit=True, extra_vars=extra_vars) + + +class InfraVMHostConfigure(KayobeAnsibleMixin, VaultMixin, + Command): + """Configure the infra VMs host OS and services. + + * Allocate IP addresses for all configured networks. + * Add the host to SSH known hosts. + * Configure a user account for use by kayobe for SSH access. + * Configure package repos. + * Configure a PyPI mirror. + * Optionally, create a virtualenv for remote target hosts. + * Optionally, wipe unmounted disk partitions (--wipe-disks). + * Configure user accounts, group associations, and authorised SSH keys. + * Disable SELinux. + * Configure the host's network interfaces. + * Set sysctl parameters. + * Disable bootstrap interface configuration. + * Configure timezone. + * Optionally, configure software RAID arrays. + * Optionally, configure encryption. + * Configure LVM volumes. + """ + + def get_parser(self, prog_name): + parser = super(InfraVMHostConfigure, self).get_parser(prog_name) + group = parser.add_argument_group("Host Configuration") + group.add_argument("--wipe-disks", action='store_true', + help="wipe partition and LVM data from all disks " + "that are not mounted. Warning: this can " + "result in the loss of data") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Configuring Infra VMs host OS") + + # Allocate IP addresses. + playbooks = _build_playbook_list("ip-allocation") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms") + + # Kayobe playbooks. + playbooks = _build_playbook_list( + "ssh-known-host", "kayobe-ansible-user", + "dnf", "pip", "kayobe-target-venv") + if parsed_args.wipe_disks: + playbooks += _build_playbook_list("wipe-disks") + playbooks += _build_playbook_list( + "users", "dev-tools", "disable-selinux", "network", + "sysctl", "disable-glean", "disable-cloud-init", "time", + "mdadm", "luks", "lvm", "docker-devicemapper", "docker") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms") + + +class InfraVMHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command): + """Update packages on the infra VMs.""" + + def get_parser(self, prog_name): + parser = super(InfraVMHostPackageUpdate, self).get_parser(prog_name) + group = parser.add_argument_group("Host Package Updates") + group.add_argument("--packages", required=True, + help="List of packages to update. Use '*' to " + "update all packages.") + group.add_argument("--security", action='store_true', + help="Only install updates that have been marked " + "security related.") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Updating infra vm host packages") + extra_vars = { + "host_package_update_packages": parsed_args.packages, + "host_package_update_security": parsed_args.security, + } + playbooks = _build_playbook_list("host-package-update") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms", + extra_vars=extra_vars) + + +class InfraVMHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command): + """Run command on the infra VMs.""" + + def get_parser(self, prog_name): + parser = super(InfraVMHostCommandRun, self).get_parser(prog_name) + group = parser.add_argument_group("Host Command Run") + group.add_argument("--command", required=True, + help="Command to run (required).") + group.add_argument("--show-output", action='store_true', + help="Show command output") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Run command on infra VM hosts") + extra_vars = { + "host_command_to_run": utils.escape_jinja(parsed_args.command), + "show_output": parsed_args.show_output} + playbooks = _build_playbook_list("host-command-run") + self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms", + extra_vars=extra_vars) + + +class InfraVMHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command): + """Upgrade the infra VM host services. + + Performs the changes necessary to make the host services suitable for the + configured OpenStack release. + """ + + def take_action(self, parsed_args): + self.app.LOG.debug("Upgrading the infra-vm host services") + playbooks = _build_playbook_list("kayobe-target-venv") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="infra-vms") + + +class InfraVMServiceDeploy(KayobeAnsibleMixin, VaultMixin, + Command): + """Run hooks for infra structure services.""" + + def take_action(self, parsed_args): + self.app.LOG.debug("Running no-op Infra VM service deploy") + + class OvercloudInventoryDiscover(KayobeAnsibleMixin, VaultMixin, Command): """Discover the overcloud inventory from the seed's Ironic service. diff --git a/kayobe/tests/unit/cli/test_commands.py b/kayobe/tests/unit/cli/test_commands.py index 80e914528..c37a13cff 100644 --- a/kayobe/tests/unit/cli/test_commands.py +++ b/kayobe/tests/unit/cli/test_commands.py @@ -919,6 +919,189 @@ class TestCase(unittest.TestCase): ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbook") + def test_infra_vm_provision(self, mock_run): + command = commands.InfraVMProvision(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + utils.get_data_files_path( + "ansible", "ip-allocation.yml"), + limit="infra-vms" + ), + mock.call( + mock.ANY, + utils.get_data_files_path( + "ansible", "infra-vm-provision.yml"), + ignore_limit=True, + extra_vars={'infra_vm_limit': 'infra-vms'} + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbook") + def test_infra_vm_deprovision(self, mock_run): + command = commands.InfraVMDeprovision(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + utils.get_data_files_path( + "ansible", "infra-vm-deprovision.yml"), + ignore_limit=True, + extra_vars={'infra_vm_limit': 'infra-vms'} + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_infra_vm_host_configure(self, mock_run): + command = commands.InfraVMHostConfigure(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [utils.get_data_files_path("ansible", "ip-allocation.yml")], + limit="infra-vms", + ), + mock.call( + mock.ANY, + [ + utils.get_data_files_path("ansible", "ssh-known-host.yml"), + utils.get_data_files_path( + "ansible", "kayobe-ansible-user.yml"), + utils.get_data_files_path("ansible", "dnf.yml"), + utils.get_data_files_path("ansible", "pip.yml"), + utils.get_data_files_path( + "ansible", "kayobe-target-venv.yml"), + utils.get_data_files_path("ansible", "users.yml"), + utils.get_data_files_path("ansible", "dev-tools.yml"), + utils.get_data_files_path( + "ansible", "disable-selinux.yml"), + utils.get_data_files_path("ansible", "network.yml"), + utils.get_data_files_path("ansible", "sysctl.yml"), + utils.get_data_files_path("ansible", "disable-glean.yml"), + utils.get_data_files_path( + "ansible", "disable-cloud-init.yml"), + utils.get_data_files_path("ansible", "time.yml"), + utils.get_data_files_path("ansible", "mdadm.yml"), + utils.get_data_files_path("ansible", "luks.yml"), + utils.get_data_files_path("ansible", "lvm.yml"), + utils.get_data_files_path("ansible", + "docker-devicemapper.yml"), + utils.get_data_files_path("ansible", "docker.yml"), + ], + limit="infra-vms", + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_infra_vm_host_upgrade(self, mock_run): + command = commands.InfraVMHostUpgrade(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "kayobe-target-venv.yml"), + ], + limit="infra-vms", + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_infra_vm_host_command_run(self, mock_run): + command = commands.InfraVMHostCommandRun(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--command", "ls -a", + "--show-output"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path("ansible", + "host-command-run.yml"), + ], + limit="infra-vms", + extra_vars={ + "host_command_to_run": utils.escape_jinja("ls -a"), + "show_output": True} + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_infra_vm_host_package_update_all(self, mock_run): + command = commands.InfraVMHostPackageUpdate(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--packages", "*"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "host-package-update.yml"), + ], + limit="infra-vms", + extra_vars={ + "host_package_update_packages": "*", + "host_package_update_security": False, + }, + ), + ] + self.assertEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbook") + def test_infra_vm_service_deploy(self, mock_run): + command = commands.InfraVMServiceDeploy(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [] + self.assertEqual(expected_calls, mock_run.call_args_list) + @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KayobeAnsibleMixin, diff --git a/releasenotes/notes/add-support-for-custom-seed-vms-a938ffdbedcd7b14.yaml b/releasenotes/notes/add-support-for-custom-seed-vms-a938ffdbedcd7b14.yaml new file mode 100644 index 000000000..63940f976 --- /dev/null +++ b/releasenotes/notes/add-support-for-custom-seed-vms-a938ffdbedcd7b14.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Adds support for deploying infrastructure VMs on the seed hypervisor. + These can be used to provide supplementary services that do not run well + within a containerised environment or are dependencies of the control + plane. See `story 2008741 + ` for details. diff --git a/setup.cfg b/setup.cfg index 708ea8191..4b1307231 100644 --- a/setup.cfg +++ b/setup.cfg @@ -94,6 +94,13 @@ kayobe.cli= seed_service_upgrade = kayobe.cli.commands:SeedServiceUpgrade seed_vm_deprovision = kayobe.cli.commands:SeedVMDeprovision seed_vm_provision = kayobe.cli.commands:SeedVMProvision + infra_vm_deprovision = kayobe.cli.commands:InfraVMDeprovision + infra_vm_provision = kayobe.cli.commands:InfraVMProvision + infra_vm_host_configure = kayobe.cli.commands:InfraVMHostConfigure + infra_vm_host_upgrade = kayobe.cli.commands:InfraVMHostUpgrade + infra_vm_host_command_run = kayobe.cli.commands:InfraVMHostCommandRun + infra_vm_host_package_update = kayobe.cli.commands:InfraVMHostPackageUpdate + infra_vm_service_deploy = kayobe.cli.commands:InfraVMServiceDeploy kayobe.cli.baremetal_compute_inspect = hooks = kayobe.cli.commands:HookDispatcher @@ -205,3 +212,17 @@ kayobe.cli.seed_vm_deprovision = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.seed_vm_provision = hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_deprovision = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_provision = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_host_configure = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_host_upgrade = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_host_command_run = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_host_package_update = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.infra_vm_service_deploy = + hooks = kayobe.cli.commands:HookDispatcher