From 280e84be829f4bd8c147d938a2a339021c4dea09 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Tue, 23 Jan 2024 16:35:24 +0000 Subject: [PATCH] Remove docker devicemapper support This has now been removed from upstream docker-ce packages. ``` the devicemapper storage-driver has been deprecated and removed ``` and has shown deprecation warnings for some time. This change also includes some fixes necessary for CI to pass: * update the Zuul previous_release variable to 2023.2 Closes-Bug: #2051233 Depends-On: https://review.opendev.org/c/openstack/kolla-ansible/+/906858 Change-Id: I263f59ea77e39accffe4febe0d47e56b35d9800e --- ansible/docker-devicemapper.yml | 44 -------------- ansible/infra-vm-host-configure.yml | 1 - ansible/inventory/group_vars/all/compute | 8 +-- ansible/inventory/group_vars/all/controllers | 6 +- ansible/inventory/group_vars/all/infra-vms | 6 +- ansible/inventory/group_vars/all/seed | 8 +-- ansible/inventory/group_vars/all/storage | 8 +-- ansible/overcloud-host-configure.yml | 1 - .../docker-devicemapper/defaults/main.yml | 25 -------- .../docker-devicemapper/handlers/main.yml | 13 ----- .../roles/docker-devicemapper/tasks/main.yml | 58 ------------------- .../templates/docker-thinpool.profile.j2 | 4 -- ansible/roles/docker/defaults/main.yml | 3 - ansible/roles/docker/tasks/main.yml | 16 ++++- ansible/roles/docker/templates/daemon.json.j2 | 25 -------- ansible/seed-host-configure.yml | 1 - doc/source/configuration/reference/hosts.rst | 32 ++++------ etc/kayobe/compute.yml | 6 +- etc/kayobe/controllers.yml | 5 +- etc/kayobe/infra-vms.yml | 4 +- etc/kayobe/seed.yml | 6 +- etc/kayobe/storage.yml | 6 +- .../overrides.yml.j2 | 5 +- .../tests/test_overcloud_host_configure.py | 4 +- .../remove-devicemapper-a594e6f24b4885ab.yaml | 7 +++ zuul.d/jobs.yaml | 2 +- 26 files changed, 58 insertions(+), 246 deletions(-) delete mode 100644 ansible/docker-devicemapper.yml delete mode 100644 ansible/roles/docker-devicemapper/defaults/main.yml delete mode 100644 ansible/roles/docker-devicemapper/handlers/main.yml delete mode 100644 ansible/roles/docker-devicemapper/tasks/main.yml delete mode 100644 ansible/roles/docker-devicemapper/templates/docker-thinpool.profile.j2 delete mode 100644 ansible/roles/docker/templates/daemon.json.j2 create mode 100644 releasenotes/notes/remove-devicemapper-a594e6f24b4885ab.yaml diff --git a/ansible/docker-devicemapper.yml b/ansible/docker-devicemapper.yml deleted file mode 100644 index 70ead14a4..000000000 --- a/ansible/docker-devicemapper.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- name: Verify and configure Docker storage driver - hosts: docker - tags: - - docker - - docker-devicemapper - tasks: - - name: Check for existing Docker configuration using devicemapper - block: - - name: Query Docker daemon for storage driver - docker_host_info: - failed_when: false - register: docker - - - block: - - name: Check for a Docker configuration file - become: True - stat: - path: /etc/docker/daemon.json - register: docker_config_file - - - name: Check whether devicemapper is used in configuration file - lineinfile: - path: /etc/docker/daemon.json - regexp: 'storage-driver.*devicemapper' - state: absent - become: True - changed_when: False - # `check_mode: True` ensures that we don't modify daemon.json - check_mode: True - register: devicemapper_docker - when: docker_config_file.stat.exists - when: not (docker.can_talk_to_docker | default(true)) - - - name: Fail if devicemapper is in use while another storage driver was requested - fail: - msg: "Docker storage driver {{ docker_storage_driver }} was requested, but devicemapper is in use" - when: (docker.can_talk_to_docker | default(true) and docker.host_info.Driver == 'devicemapper') or (devicemapper_docker.found | default(0) == 1) - when: docker_storage_driver != 'devicemapper' - - - name: Ensure Docker devicemapper storage is configured - include_role: - name: docker-devicemapper - when: docker_storage_driver == 'devicemapper' diff --git a/ansible/infra-vm-host-configure.yml b/ansible/infra-vm-host-configure.yml index e175757e5..3a987d24c 100644 --- a/ansible/infra-vm-host-configure.yml +++ b/ansible/infra-vm-host-configure.yml @@ -20,5 +20,4 @@ - import_playbook: "mdadm.yml" - import_playbook: "luks.yml" - import_playbook: "lvm.yml" -- import_playbook: "docker-devicemapper.yml" - import_playbook: "docker.yml" diff --git a/ansible/inventory/group_vars/all/compute b/ansible/inventory/group_vars/all/compute index e235397d8..908dabb4d 100644 --- a/ansible/inventory/group_vars/all/compute +++ b/ansible/inventory/group_vars/all/compute @@ -83,11 +83,9 @@ compute_lvm_groups_default: "{{ [compute_lvm_group_data] if compute_lvm_group_da compute_lvm_groups_extra: [] # Whether a 'data' LVM volume group should exist on compute hosts. By default -# this contains a 'docker-volumes' logical volume for Docker volume storage. It -# will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. -compute_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" +# this contains a 'docker-volumes' logical volume for Docker volume storage +# Default is false. +compute_lvm_group_data_enabled: false # Compute LVM volume group for data. See mrlesmithjr.manage_lvm role for # format. diff --git a/ansible/inventory/group_vars/all/controllers b/ansible/inventory/group_vars/all/controllers index f25cb4b2d..9ce0e2df2 100644 --- a/ansible/inventory/group_vars/all/controllers +++ b/ansible/inventory/group_vars/all/controllers @@ -125,10 +125,8 @@ controller_lvm_groups_extra: [] # Whether a 'data' LVM volume group should exist on controller hosts. By # default this contains a 'docker-volumes' logical volume for Docker volume -# storage. It will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. -controller_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" +# storage. Default is false. +controller_lvm_group_data_enabled: false # Controller LVM volume group for data. See mrlesmithjr.manage_lvm role for # format. diff --git a/ansible/inventory/group_vars/all/infra-vms b/ansible/inventory/group_vars/all/infra-vms index 3327ed69b..c78494ca4 100644 --- a/ansible/inventory/group_vars/all/infra-vms +++ b/ansible/inventory/group_vars/all/infra-vms @@ -127,10 +127,8 @@ infra_vm_lvm_groups_extra: [] # Whether a 'data' LVM volume group should exist on the infrastructure vm. By # default this contains a 'docker-volumes' logical volume for Docker volume -# storage. It will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. -infra_vm_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" +# storage. Default is false. +infra_vm_lvm_group_data_enabled: false # Infrastructure VM LVM volume group for data. See mrlesmithjr.manage_lvm role # for format. diff --git a/ansible/inventory/group_vars/all/seed b/ansible/inventory/group_vars/all/seed index 14b67020b..3263c3ca5 100644 --- a/ansible/inventory/group_vars/all/seed +++ b/ansible/inventory/group_vars/all/seed @@ -53,11 +53,9 @@ seed_lvm_groups_default: "{{ [seed_lvm_group_data] if seed_lvm_group_data_enable seed_lvm_groups_extra: [] # Whether a 'data' LVM volume group should exist on the seed. By default this -# contains a 'docker-volumes' logical volume for Docker volume storage. It will -# also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. -seed_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" +# contains a 'docker-volumes' logical volume for Docker volume storage. +# Default is false. +seed_lvm_group_data_enabled: false # Seed LVM volume group for data. See mrlesmithjr.manage_lvm role for format. seed_lvm_group_data: diff --git a/ansible/inventory/group_vars/all/storage b/ansible/inventory/group_vars/all/storage index 40bcd8586..ff4f20d5f 100644 --- a/ansible/inventory/group_vars/all/storage +++ b/ansible/inventory/group_vars/all/storage @@ -95,11 +95,9 @@ storage_lvm_groups_default: "{{ [storage_lvm_group_data] if storage_lvm_group_da storage_lvm_groups_extra: [] # Whether a 'data' LVM volume group should exist on storage hosts. By default -# this contains a 'docker-volumes' logical volume for Docker volume storage. It -# will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. -storage_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}" +# this contains a 'docker-volumes' logical volume for Docker volume storage. +# Default is false. +storage_lvm_group_data_enabled: false # Storage LVM volume group for data. See mrlesmithjr.manage_lvm role for # format. diff --git a/ansible/overcloud-host-configure.yml b/ansible/overcloud-host-configure.yml index e203c8dac..afdfa6b9a 100644 --- a/ansible/overcloud-host-configure.yml +++ b/ansible/overcloud-host-configure.yml @@ -22,7 +22,6 @@ - import_playbook: "luks.yml" - import_playbook: "lvm.yml" - import_playbook: "swap.yml" -- import_playbook: "docker-devicemapper.yml" - import_playbook: "kolla-ansible-user.yml" - import_playbook: "kolla-pip.yml" - import_playbook: "kolla-target-venv.yml" diff --git a/ansible/roles/docker-devicemapper/defaults/main.yml b/ansible/roles/docker-devicemapper/defaults/main.yml deleted file mode 100644 index 7c210ae89..000000000 --- a/ansible/roles/docker-devicemapper/defaults/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Name of the docker storage driver. -docker_storage_driver: overlay2 - -# Name of the docker storage LVM volume group. -docker_storage_volume_group: - -# Name of the docker storage data LVM volume. -docker_storage_volume_thinpool: - -# Size of the docker storage data LVM volume (see lvol module size argument). -docker_storage_volume_thinpool_size: - -# Name of the docker storage metadata LVM volume. -docker_storage_volume_thinpool_meta: - -# Size of the docker storage metadata LVM volume (see lvol module size -# argument). -docker_storage_volume_thinpool_meta_size: - -# Threshold at which to extend thin-provisioned docker storage volumes. -docker_storage_thinpool_autoextend_threshold: 80 - -# Percentage by which to extend thin-provisioned docker storage volumes. -docker_storage_thinpool_autoextend_percent: 20 diff --git a/ansible/roles/docker-devicemapper/handlers/main.yml b/ansible/roles/docker-devicemapper/handlers/main.yml deleted file mode 100644 index b7bf0f830..000000000 --- a/ansible/roles/docker-devicemapper/handlers/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Ensure the docker storage volume is converted to a thinpool - command: > - lvconvert -y --zero n -c 512K - --thinpool {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool }} - --poolmetadata {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool_meta }} - become: True - -- name: Ensure the docker storage metadata profile is applied - command: > - lvchange --metadataprofile docker-thinpool - {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool }} - become: True diff --git a/ansible/roles/docker-devicemapper/tasks/main.yml b/ansible/roles/docker-devicemapper/tasks/main.yml deleted file mode 100644 index fbda5a607..000000000 --- a/ansible/roles/docker-devicemapper/tasks/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Query docker daemon information - command: "docker info" - register: docker_info - changed_when: False - failed_when: False - -- name: Fail when non-devicemapper containers or images exist - fail: - msg: > - Not configuring docker storage in {{ docker_storage_driver }} mode as - non-devicemapper containers or images exist. - when: - - docker_info.rc == 0 - - "'Data loop file' in docker_info.stdout or 'devicemapper' not in docker_info.stdout" - - "'Images: 0' not in docker_info.stdout or 'Containers: 0' not in docker_info.stdout" - -- name: Ensure the docker storage metadata profile exists - template: - src: docker-thinpool.profile.j2 - dest: /etc/lvm/profile/docker-thinpool.profile - become: True - -- name: Query LVM thinpool volume - command: "lvs {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool }}" - register: lvs_result - changed_when: false - failed_when: false - become: true - -- block: - - name: Ensure the docker storage data and metadata volumes exist - lvol: - vg: "{{ docker_storage_volume_group }}" - lv: "{{ item.name }}" - size: "{{ item.size }}" - shrink: no - state: present - with_items: - - name: "{{ docker_storage_volume_thinpool }}" - size: "{{ docker_storage_volume_thinpool_size }}" - - name: "{{ docker_storage_volume_thinpool_meta }}" - size: "{{ docker_storage_volume_thinpool_meta_size }}" - become: True - - - name: Ensure the docker storage volume is converted to a thinpool - command: > - lvconvert -y --zero n -c 512K - --thinpool {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool }} - --poolmetadata {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool_meta }} - become: True - - - name: Ensure the docker storage metadata profile is applied - command: > - lvchange --metadataprofile docker-thinpool - {{ docker_storage_volume_group }}/{{ docker_storage_volume_thinpool }} - become: True - when: lvs_result.rc != 0 diff --git a/ansible/roles/docker-devicemapper/templates/docker-thinpool.profile.j2 b/ansible/roles/docker-devicemapper/templates/docker-thinpool.profile.j2 deleted file mode 100644 index 4627e2ba2..000000000 --- a/ansible/roles/docker-devicemapper/templates/docker-thinpool.profile.j2 +++ /dev/null @@ -1,4 +0,0 @@ -activation { - thin_pool_autoextend_threshold={{ docker_storage_thinpool_autoextend_threshold }} - thin_pool_autoextend_percent={{ docker_storage_thinpool_autoextend_percent }} -} diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml index d9df1ef78..66579bfa5 100644 --- a/ansible/roles/docker/defaults/main.yml +++ b/ansible/roles/docker/defaults/main.yml @@ -1,7 +1,4 @@ --- -docker_storage_driver: overlay2 -docker_storage_volume_group: -docker_storage_volume_thinpool: docker_registry_mirrors: [] docker_daemon_debug: false docker_daemon_mtu: 1500 diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml index 08935fad6..13fd6bd03 100644 --- a/ansible/roles/docker/tasks/main.yml +++ b/ansible/roles/docker/tasks/main.yml @@ -2,4 +2,18 @@ - import_role: name: openstack.kolla.docker vars: - docker_custom_config: "{{ lookup('template', 'daemon.json.j2') | to_nice_json | indent(2) }}" + docker_custom_config: >- + {%- set options = {} -%} + {%- if docker_daemon_debug | bool -%} + {%- set _ = options.update({"debug": docker_daemon_debug | bool}) -%} + {%- endif -%} + {%- if docker_registry_mirrors | length > 0 -%} + {%- set _ = options.update({"registry-mirrors": docker_registry_mirrors}) -%} + {%- endif -%} + {%- if docker_daemon_mtu -%} + {%- set _ = options.update({"mtu": docker_daemon_mtu}) -%} + {%- endif -%} + {%- if docker_daemon_live_restore | bool -%} + {%- set _ = options.update({"live-restore": docker_daemon_live_restore | bool}) -%} + {%- endif -%} + {{ options }} diff --git a/ansible/roles/docker/templates/daemon.json.j2 b/ansible/roles/docker/templates/daemon.json.j2 deleted file mode 100644 index bf392d560..000000000 --- a/ansible/roles/docker/templates/daemon.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ -{%- if docker_daemon_debug | bool %} - "debug": {{ docker_daemon_debug | to_json }}, -{%- endif %} -{%- if docker_registry_mirrors | length > 0 %} - "registry-mirrors": [ -{%- for mirror in docker_registry_mirrors %} - "{{ mirror }}"{%- if not loop.last -%},{%- endif %} -{%- endfor %} - ], -{%- endif %} -{% if docker_daemon_mtu %} - "mtu": {{ docker_daemon_mtu }}, -{% endif %} -{% if docker_daemon_live_restore %} - "live-restore": {{ docker_daemon_live_restore | to_json }}, -{% endif %} - "storage-opts": [ -{% if docker_storage_driver == 'devicemapper' %} - "dm.thinpooldev=/dev/mapper/{{ docker_storage_volume_group | replace('-', '--') }}-{{ docker_storage_volume_thinpool | replace('-', '--') }}", - "dm.use_deferred_removal=true", - "dm.use_deferred_deletion=true" -{% endif %} - ] -} diff --git a/ansible/seed-host-configure.yml b/ansible/seed-host-configure.yml index 920ff2dce..6040eda42 100644 --- a/ansible/seed-host-configure.yml +++ b/ansible/seed-host-configure.yml @@ -21,7 +21,6 @@ - import_playbook: "mdadm.yml" - import_playbook: "luks.yml" - import_playbook: "lvm.yml" -- import_playbook: "docker-devicemapper.yml" - import_playbook: "kolla-ansible-user.yml" - import_playbook: "kolla-pip.yml" - import_playbook: "kolla-target-venv.yml" diff --git a/doc/source/configuration/reference/hosts.rst b/doc/source/configuration/reference/hosts.rst index 750d3af2f..19edabdb2 100644 --- a/doc/source/configuration/reference/hosts.rst +++ b/doc/source/configuration/reference/hosts.rst @@ -904,14 +904,11 @@ LVM for Docker In Train and earlier releases of Kayobe, the ``data`` volume group was always enabled by default. -If the ``devicemapper`` Docker storage driver is in use, the default LVM -configuration is optimised for it. The ``devicemapper`` driver requires a thin -provisioned LVM volume. A second logical volume is used for storing Docker -volume data, mounted at ``/var/lib/docker/volumes``. Both logical volumes are -created from a single ``data`` volume group. +A logical volume for storing Docker volume data, mounted at ``/var/lib/docker/volumes`` +can optionally be created. The logical volume is created in volume group called data. This configuration is enabled by the following variables, which default to -``true`` if the ``devicemapper`` driver is in use or ``false`` otherwise: +``false``: * ``compute_lvm_group_data_enabled`` * ``controller_lvm_group_data_enabled`` @@ -919,10 +916,6 @@ This configuration is enabled by the following variables, which default to * ``infra_vm_lvm_group_data_enabled`` * ``storage_lvm_group_data_enabled`` -These variables can be set to ``true`` to enable the data volume group if the -``devicemapper`` driver is not in use. This may be useful where the -``docker-volumes`` logical volume is required. - To use this configuration, a list of disks must be configured via the following variables: @@ -952,18 +945,17 @@ variables, with a default value of 75% (of the volume group's capacity): * ``monitoring_lvm_group_data_lv_docker_volumes_size`` * ``storage_lvm_group_data_lv_docker_volumes_size`` -If using a Docker storage driver other than ``devicemapper``, the remaining 25% -of the volume group can be used for Docker volume data. In this case, the LVM -volume's size can be increased to 100%: +You can control the amount of storage assigned to the docker volumes LV by +using the following variable. .. code-block:: yaml :caption: ``controllers.yml`` controller_lvm_group_data_lv_docker_volumes_size: 100% -If using a Docker storage driver other than ``devicemapper``, it is possible to -avoid using LVM entirely, thus avoiding the requirement for multiple disks. In -this case, set the appropriate ``_lvm_groups`` variable to an empty list: +It is possible to avoid using LVM entirely, thus avoiding the requirement for +multiple disks. In this case, set the appropriate ``_lvm_groups`` +variable to an empty list: .. code-block:: yaml :caption: ``storage.yml`` @@ -1058,12 +1050,8 @@ Docker Engine | ``docker`` The ``docker_storage_driver`` variable sets the Docker storage driver, and by -default the ``overlay2`` driver is used. If using the ``devicemapper`` driver, -see :ref:`configuration-hosts-lvm` for information about configuring LVM for -Docker. - -Various options are defined in ``${KAYOBE_CONFIG_PATH}/docker.yml`` -for configuring the ``devicemapper`` storage. +default the ``overlay2`` driver is used. See :ref:`configuration-hosts-lvm` for +information about configuring LVM for Docker. If using an insecure (HTTP) registry, set ``docker_registry_insecure`` to ``true``. diff --git a/etc/kayobe/compute.yml b/etc/kayobe/compute.yml index 15c4cbcc6..7a861fcd8 100644 --- a/etc/kayobe/compute.yml +++ b/etc/kayobe/compute.yml @@ -76,10 +76,8 @@ #compute_lvm_groups_extra: # Whether a 'data' LVM volume group should exist on compute hosts. By default -# this contains a 'docker-volumes' logical volume for Docker volume storage. It -# will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +# this contains a 'docker-volumes' logical volume for Docker volume storage. +# Default is false. #compute_lvm_group_data_enabled: # Compute LVM volume group for data. See mrlesmithjr.manage_lvm role for diff --git a/etc/kayobe/controllers.yml b/etc/kayobe/controllers.yml index 76b7bb00a..d974cc6b1 100644 --- a/etc/kayobe/controllers.yml +++ b/etc/kayobe/controllers.yml @@ -105,9 +105,8 @@ # Whether a 'data' LVM volume group should exist on controller hosts. By # default this contains a 'docker-volumes' logical volume for Docker volume -# storage. It will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +# storage. +# Default is false. #controller_lvm_group_data_enabled: # Controller LVM volume group for data. See mrlesmithjr.manage_lvm role for diff --git a/etc/kayobe/infra-vms.yml b/etc/kayobe/infra-vms.yml index 3e1db5d5e..a8f1fd9b2 100644 --- a/etc/kayobe/infra-vms.yml +++ b/etc/kayobe/infra-vms.yml @@ -106,9 +106,7 @@ # Whether a 'data' LVM volume group should exist on the infrastructure vm. By # default this contains a 'docker-volumes' logical volume for Docker volume -# storage. It will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +# storage. Default is false. #infra_vm_lvm_group_data_enabled: # Infrastructure VM LVM volume group for data. See mrlesmithjr.manage_lvm role diff --git a/etc/kayobe/seed.yml b/etc/kayobe/seed.yml index bc86fa627..541c07808 100644 --- a/etc/kayobe/seed.yml +++ b/etc/kayobe/seed.yml @@ -48,10 +48,8 @@ #seed_lvm_groups_extra: # Whether a 'data' LVM volume group should exist on the seed. By default this -# contains a 'docker-volumes' logical volume for Docker volume storage. It will -# also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +# contains a 'docker-volumes' logical volume for Docker volume storage. +# Default is false. #seed_lvm_group_data_enabled: # Seed LVM volume group for data. See mrlesmithjr.manage_lvm role for format. diff --git a/etc/kayobe/storage.yml b/etc/kayobe/storage.yml index e9e52dfe6..2cdac5bed 100644 --- a/etc/kayobe/storage.yml +++ b/etc/kayobe/storage.yml @@ -81,10 +81,8 @@ #storage_lvm_groups_extra: # Whether a 'data' LVM volume group should exist on storage hosts. By default -# this contains a 'docker-volumes' logical volume for Docker volume storage. It -# will also be used for Docker container and image storage if -# 'docker_storage_driver' is set to 'devicemapper'. Default is true if -# 'docker_storage_driver' is set to 'devicemapper', or false otherwise. +# this contains a 'docker-volumes' logical volume for Docker volume storage. +# Default is false. #storage_lvm_group_data_enabled: # Storage LVM volume group for data. See mrlesmithjr.manage_lvm role for diff --git a/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 b/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 index da0ea7765..2d7828f05 100644 --- a/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 +++ b/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 @@ -107,7 +107,7 @@ controller_luks_devices: - name: loopback-crypt device: /dev/md0 -# Create an LVM volume group for Docker volumes and devicemapper. +# Create an LVM volume group for Docker volumes. controller_lvm_groups: - "{% raw %}{{ controller_lvm_group_data }}{% endraw %}" @@ -122,9 +122,6 @@ controller_sysctl_parameters: # Disable cloud-init. disable_cloud_init: true -# Use devicemapper storage driver. -docker_storage_driver: devicemapper - # Set Honolulu time. timezone: Pacific/Honolulu diff --git a/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py b/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py index 1a1965723..1c9c1024a 100644 --- a/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py +++ b/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py @@ -144,10 +144,10 @@ def test_cloud_init_is_disabled(host): assert host.file("/etc/cloud/cloud-init.disabled").exists -def test_docker_storage_driver_is_devicemapper(host): +def test_docker_storage_driver_is_overlay2(host): with host.sudo("stack"): info = host.check_output("docker info") - assert "devicemapper" in info + assert "overlay2" in info @pytest.mark.parametrize('user', ['kolla', 'stack']) diff --git a/releasenotes/notes/remove-devicemapper-a594e6f24b4885ab.yaml b/releasenotes/notes/remove-devicemapper-a594e6f24b4885ab.yaml new file mode 100644 index 000000000..deb502037 --- /dev/null +++ b/releasenotes/notes/remove-devicemapper-a594e6f24b4885ab.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + Support for the ``devicemapper`` Docker storage driver is removed + following its removal from Docker Engine 25.0. Operators using + ``devicemapper`` should migrate to a supported storage driver before + updating Docker to 25.0 or later. diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index 131ef7875..a89597198 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -92,7 +92,7 @@ - ^zuul.d/.* vars: # Name of the release to upgrade from for upgrade jobs. - previous_release: 2023.1 + previous_release: 2023.2 logs_dir: "/tmp/logs" ansible_collection_kolla_src_dir: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/ansible-collection-kolla'].src_dir }}" kayobe_src_dir: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/kayobe'].src_dir }}"