Merge "Update ironic IPA deployment images"
This commit is contained in:
commit
4b33044eb9
@ -99,4 +99,5 @@
|
||||
ipa_images_venv: "{{ virtualenv_path }}/shade"
|
||||
ipa_images_openstack_auth_type: "{{ openstack_auth_type }}"
|
||||
ipa_images_openstack_auth: "{{ openstack_auth }}"
|
||||
ipa_images_openstack_auth_env: "{{ openstack_auth_env }}"
|
||||
ipa_images_cache_path: "{{ image_cache_path }}/{{ ipa_image_name }}"
|
||||
|
@ -10,6 +10,10 @@ ipa_images_openstack_auth_type:
|
||||
# auth argument.
|
||||
ipa_images_openstack_auth: {}
|
||||
|
||||
# Authentication parameters in environment variable form, as accepted by the
|
||||
# openstack client.
|
||||
ipa_images_openstack_auth_env: {}
|
||||
|
||||
# Path to directory in which to store downloaded images.
|
||||
ipa_images_cache_path:
|
||||
|
||||
@ -26,3 +30,11 @@ ipa_images_ramdisk_name:
|
||||
# URL of Ironic deployment ramdisk image to download. If unset, an existing
|
||||
# image in ipa_images_cache_path will be used.
|
||||
ipa_images_ramdisk_url:
|
||||
|
||||
# Ansible host pattern for limiting which nodes are updated with deploy_ramdisk
|
||||
# and deploy_kernel properties
|
||||
ipa_images_compute_node_limit: baremetal-compute
|
||||
|
||||
# Whether or not to update the deploy_ramdisk and deploy_kernel Ironic node
|
||||
# properties
|
||||
ipa_images_update_ironic_nodes: False
|
||||
|
@ -2,3 +2,5 @@
|
||||
dependencies:
|
||||
- role: stackhpc.os-shade
|
||||
os_shade_venv: "{{ ipa_images_venv }}"
|
||||
- role: stackhpc.os-openstackclient
|
||||
os_openstackclient_venv: "{{ ipa_images_venv }}"
|
||||
|
@ -11,6 +11,8 @@
|
||||
get_url:
|
||||
url: "{{ item.url }}"
|
||||
dest: "{{ ipa_images_cache_path }}/{{ item.filename }}"
|
||||
force: true
|
||||
backup: true
|
||||
with_items:
|
||||
- url: "{{ ipa_images_kernel_url }}"
|
||||
filename: "{{ ipa_images_kernel_name }}"
|
||||
@ -21,20 +23,36 @@
|
||||
- name: Compute the MD5 checksum of the Ironic Python Agent (IPA) images
|
||||
stat:
|
||||
path: "{{ ipa_images_cache_path }}/{{ item }}"
|
||||
get_md5: True
|
||||
get_checksum: False
|
||||
get_checksum: True
|
||||
checksum_algorithm: md5
|
||||
mime: False
|
||||
with_items:
|
||||
- "{{ ipa_images_kernel_name }}"
|
||||
- "{{ ipa_images_ramdisk_name }}"
|
||||
register: ipa_images_checksum
|
||||
|
||||
- name: Fail if an image does not exist
|
||||
fail:
|
||||
msg: "{{ item.path }} does not exist"
|
||||
with_items:
|
||||
- path: "{{ ipa_images_cache_path }}/{{ ipa_images_kernel_name }}"
|
||||
exists: "{{ ipa_images_checksum.results[0].stat.exists | bool }}"
|
||||
- path: "{{ ipa_images_cache_path }}/{{ ipa_images_ramdisk_name }}"
|
||||
exists: "{{ ipa_images_checksum.results[1].stat.exists | bool }}"
|
||||
when:
|
||||
- not item.exists
|
||||
|
||||
- name: Activate the virtualenv
|
||||
include_role:
|
||||
name: activate-virtualenv
|
||||
vars:
|
||||
activate_virtualenv_path: "{{ ipa_images_venv }}"
|
||||
|
||||
- name: Ensure we have python-ironicclient installed
|
||||
pip:
|
||||
name: python-ironicclient
|
||||
virtualenv: "{{ ipa_images_venv }}"
|
||||
|
||||
# To support updating the IPA image, we check the MD5 sum of the cached image
|
||||
# files, and compare with the images in Glance (if there are any).
|
||||
|
||||
@ -44,10 +62,9 @@
|
||||
auth: "{{ ipa_images_openstack_auth }}"
|
||||
image: "{{ ipa_images_kernel_name }}"
|
||||
|
||||
- name: Set a fact containing the Ironic Python Agent (IPA) kernel image checksum
|
||||
- name: Set a fact containing the Ironic Python Agent (IPA) kernel image
|
||||
set_fact:
|
||||
ipa_images_kernel_checksum: "{{ openstack_image.checksum }}"
|
||||
when: openstack_image != None
|
||||
ipa_images_kernel_openstack_image: "{{ openstack_image if openstack_image else {} }}"
|
||||
|
||||
- name: Gather facts about Ironic Python Agent (IPA) ramdisk image
|
||||
os_image_facts:
|
||||
@ -55,27 +72,30 @@
|
||||
auth: "{{ ipa_images_openstack_auth }}"
|
||||
image: "{{ ipa_images_ramdisk_name }}"
|
||||
|
||||
- name: Set a fact containing the Ironic Python Agent (IPA) ramdisk image checksum
|
||||
- name: Set a fact containing the Ironic Python Agent (IPA) ramdisk image
|
||||
set_fact:
|
||||
ipa_images_ramdisk_checksum: "{{ openstack_image.checksum }}"
|
||||
when: openstack_image != None
|
||||
ipa_images_ramdisk_openstack_image: "{{ openstack_image if openstack_image else {} }}"
|
||||
|
||||
- name: Ensure Ironic Python Agent (IPA) images are removed from Glance
|
||||
os_image:
|
||||
auth_type: "{{ ipa_images_openstack_auth_type }}"
|
||||
auth: "{{ ipa_images_openstack_auth }}"
|
||||
name: "{{ item.name }}"
|
||||
state: absent
|
||||
# The os_image module will get confused if there are multiple images with the
|
||||
# same name, so rename the old images. They will still be accessible via UUID.
|
||||
- name: Ensure old Ironic Python Agent (IPA) images are renamed
|
||||
command: >
|
||||
{{ ipa_images_venv }}/bin/openstack image set {{ item.name }} --name {{ item.name }}.{{ extension }}
|
||||
vars:
|
||||
extension: "{{ item.created_at | replace(':', '-') }}~"
|
||||
with_items:
|
||||
- name: "{{ ipa_images_kernel_name }}"
|
||||
checksum: "{{ ipa_images_checksum.results[0].stat.md5 }}"
|
||||
glance_checksum: "{{ ipa_images_kernel_checksum | default }}"
|
||||
created_at: "{{ ipa_images_kernel_openstack_image.created_at | default }}"
|
||||
checksum: "{{ ipa_images_checksum.results[0].stat.checksum }}"
|
||||
glance_checksum: "{{ ipa_images_kernel_openstack_image.checksum | default }}"
|
||||
- name: "{{ ipa_images_ramdisk_name }}"
|
||||
checksum: "{{ ipa_images_checksum.results[1].stat.md5 }}"
|
||||
glance_checksum: "{{ ipa_images_ramdisk_checksum | default }}"
|
||||
created_at: "{{ ipa_images_ramdisk_openstack_image.created_at | default }}"
|
||||
checksum: "{{ ipa_images_checksum.results[1].stat.checksum }}"
|
||||
glance_checksum: "{{ ipa_images_ramdisk_openstack_image.checksum | default }}"
|
||||
when:
|
||||
- item.glance_checksum != None
|
||||
- item.glance_checksum
|
||||
- item.checksum != item.glance_checksum
|
||||
environment: "{{ ipa_images_openstack_auth_env }}"
|
||||
|
||||
- name: Ensure Ironic Python Agent (IPA) images are registered with Glance
|
||||
os_image:
|
||||
@ -91,6 +111,10 @@
|
||||
format: aki
|
||||
- name: "{{ ipa_images_ramdisk_name }}"
|
||||
format: ari
|
||||
register: ipa_images_new_images
|
||||
|
||||
- include_tasks: set-driver-info.yml
|
||||
when: ipa_images_update_ironic_nodes | bool
|
||||
|
||||
- name: Deactivate the virtualenv
|
||||
include_role:
|
||||
|
60
ansible/roles/ipa-images/tasks/set-driver-info.yml
Normal file
60
ansible/roles/ipa-images/tasks/set-driver-info.yml
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
|
||||
- name: Retrieve deployment image uuids
|
||||
os_image_facts:
|
||||
auth_type: "{{ ipa_images_openstack_auth_type }}"
|
||||
auth: "{{ ipa_images_openstack_auth }}"
|
||||
image: "{{ item.name }}"
|
||||
with_items:
|
||||
- name: "{{ ipa_images_kernel_name }}"
|
||||
- name: "{{ ipa_images_ramdisk_name }}"
|
||||
register: ipa_images_glance
|
||||
|
||||
- name: Set fact containing kernel uuid
|
||||
set_fact:
|
||||
ipa_images_kernel_uuid: "{{ ipa_images_glance.results[0].ansible_facts.openstack_image.id }}"
|
||||
|
||||
- name: Set fact containing ramdisk uuid
|
||||
set_fact:
|
||||
ipa_images_ramdisk_uuid: "{{ ipa_images_glance.results[1].ansible_facts.openstack_image.id }}"
|
||||
|
||||
- name: Get a list of ironic nodes
|
||||
command: |
|
||||
{{ ipa_images_venv }}/bin/openstack baremetal node list --fields name uuid driver_info -f json
|
||||
register: ipa_images_ironic_node_list
|
||||
changed_when: False
|
||||
environment: "{{ ipa_images_openstack_auth_env }}"
|
||||
|
||||
- name: Make sure openstack nodes are in baremetal-compute group
|
||||
add_host:
|
||||
name: "{{ item.Name }}"
|
||||
groups: baremetal-compute
|
||||
when:
|
||||
- item.Name is not none
|
||||
- item.Name not in groups["baremetal-compute"]
|
||||
with_items: "{{ ipa_images_ironic_node_list.stdout | from_json }}"
|
||||
|
||||
- name: Set fact containing filtered list of nodes
|
||||
set_fact:
|
||||
ipa_images_compute_node_whitelist: "{{ query('inventory_hostnames', ipa_images_compute_node_limit) | unique }}"
|
||||
|
||||
- name: Initialise a fact containing the ironic nodes
|
||||
set_fact:
|
||||
ipa_images_ironic_nodes: []
|
||||
|
||||
- name: Update a fact containing the ironic nodes
|
||||
set_fact:
|
||||
ipa_images_ironic_nodes: "{{ ipa_images_ironic_nodes + [item] }}"
|
||||
with_items: "{{ ipa_images_ironic_node_list.stdout | from_json }}"
|
||||
when: item['Name'] in ipa_images_compute_node_whitelist
|
||||
|
||||
- name: Ensure ironic nodes use the new Ironic Python Agent (IPA) images
|
||||
command: >
|
||||
{{ ipa_images_venv }}/bin/openstack baremetal node set {{ item.UUID }}
|
||||
--driver-info deploy_kernel={{ ipa_images_kernel_uuid }}
|
||||
--driver-info deploy_ramdisk={{ ipa_images_ramdisk_uuid }}
|
||||
with_items: "{{ ipa_images_ironic_nodes }}"
|
||||
when:
|
||||
item["Driver Info"].deploy_kernel != ipa_images_kernel_uuid or
|
||||
item["Driver Info"].deploy_ramdisk != ipa_images_ramdisk_uuid
|
||||
environment: "{{ ipa_images_openstack_auth_env }}"
|
@ -203,6 +203,31 @@ according to their inventory host names, you can run the following command::
|
||||
This command will use the ``ipmi_address`` host variable from the inventory
|
||||
to map the inventory host name to the correct node.
|
||||
|
||||
.. _update_deployment_image:
|
||||
|
||||
Update Deployment Image
|
||||
-----------------------
|
||||
|
||||
When the overcloud deployment images have been rebuilt or there has been a change
|
||||
to one of the following variables:
|
||||
|
||||
- ``ipa_kernel_upstream_url``
|
||||
- ``ipa_ramdisk_upstream_url``
|
||||
|
||||
either by changing the url, or if the image to which they point
|
||||
has been changed, you need to update the ``deploy_ramdisk``
|
||||
and ``deploy_kernel`` properties on the Ironic nodes. To do
|
||||
this you can run::
|
||||
|
||||
(kayobe) $ kayobe baremetal compute update deployment image
|
||||
|
||||
You can optionally limit the nodes in which this affects by setting ``baremetal-compute-limit``::
|
||||
|
||||
(kayobe) $ kayobe baremetal compute update deployment image --baremetal-compute-limit sand-6-1
|
||||
|
||||
which should take the form of an `ansible host pattern <https://docs.ansible.com/ansible/latest/user_guide/intro_patterns.html>`_.
|
||||
This is matched against the Ironic node name.
|
||||
|
||||
Running Kayobe Playbooks on Demand
|
||||
==================================
|
||||
|
||||
|
@ -121,6 +121,8 @@ should be upgraded::
|
||||
Note that this will not perform full configuration of the host, and will
|
||||
instead perform a targeted upgrade of specific services where necessary.
|
||||
|
||||
.. _building_ironic_deployment_images:
|
||||
|
||||
Building Ironic Deployment Images
|
||||
---------------------------------
|
||||
|
||||
@ -141,9 +143,23 @@ should be set to ``True``. To build images locally::
|
||||
Upgrading Ironic Deployment Images
|
||||
----------------------------------
|
||||
|
||||
Prior to upgrading the OpenStack control plane, the baremetal compute nodes
|
||||
should be configured to use an updated deployment ramdisk. This procedure is
|
||||
not currently automated by kayobe, and should be performed manually.
|
||||
Prior to upgrading the OpenStack control plane you should upgrade
|
||||
the deployment images. If you are using prebuilt images, update
|
||||
``ipa_kernel_upstream_url`` and ``ipa_ramdisk_upstream_url`` in
|
||||
``etc/kayobe/ipa.yml``, alternatively, you can update the files that the URLs
|
||||
point to. If building the images locally, follow the process outlined in
|
||||
:ref:`building_ironic_deployment_images`.
|
||||
|
||||
To get Ironic to use an updated set of overcloud deployment images, you can run::
|
||||
|
||||
(kayobe) $ kayobe baremetal compute update deployment image
|
||||
|
||||
This will register the images in Glance and update the ``deploy_ramdisk``
|
||||
and ``deploy_kernel`` properties of the Ironic nodes.
|
||||
|
||||
Before rolling out the update to all nodes, it can be useful to test the image
|
||||
on a limited subset. To do this, you can use the ``baremetal-compute-limit``
|
||||
option. See :ref:`update_deployment_image` for more details.
|
||||
|
||||
Building Container Images
|
||||
-------------------------
|
||||
|
@ -1238,3 +1238,31 @@ class BaremetalComputeRename(KayobeAnsibleMixin, VaultMixin, Command):
|
||||
self.app.LOG.debug("Renaming baremetal compute nodes")
|
||||
playbooks = _build_playbook_list("baremetal-compute-rename")
|
||||
self.run_kayobe_playbooks(parsed_args, playbooks)
|
||||
|
||||
|
||||
class BaremetalComputeUpdateDeploymentImage(KayobeAnsibleMixin, VaultMixin,
|
||||
Command):
|
||||
"""Update the Ironic nodes to use the new kernel and ramdisk images."""
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(BaremetalComputeUpdateDeploymentImage, self).get_parser(
|
||||
prog_name)
|
||||
group = parser.add_argument_group("Baremetal Compute Update")
|
||||
group.add_argument("--baremetal-compute-limit",
|
||||
help="Limit the upgrade to the hosts specified in "
|
||||
"this limit"
|
||||
)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.app.LOG.debug(
|
||||
"Upgrading the ironic nodes to use the latest deployment images")
|
||||
playbooks = _build_playbook_list("overcloud-ipa-images")
|
||||
extra_vars = {}
|
||||
extra_vars["ipa_images_update_ironic_nodes"] = True
|
||||
if parsed_args.baremetal_compute_limit:
|
||||
extra_vars["ipa_images_compute_node_limit"] = (
|
||||
parsed_args.baremetal_compute_limit
|
||||
)
|
||||
self.run_kayobe_playbooks(parsed_args, playbooks,
|
||||
extra_vars=extra_vars)
|
||||
|
@ -1009,3 +1009,48 @@ class TestCase(unittest.TestCase):
|
||||
),
|
||||
]
|
||||
self.assertEqual(expected_calls, mock_run.call_args_list)
|
||||
|
||||
@mock.patch.object(commands.KayobeAnsibleMixin,
|
||||
"run_kayobe_playbooks")
|
||||
def test_baremetal_compute_update_deployment_image(self, mock_run):
|
||||
command = commands.BaremetalComputeUpdateDeploymentImage(TestApp(), [])
|
||||
parser = command.get_parser("test")
|
||||
parsed_args = parser.parse_args([])
|
||||
result = command.run(parsed_args)
|
||||
self.assertEqual(0, result)
|
||||
expected_calls = [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"ansible/overcloud-ipa-images.yml",
|
||||
],
|
||||
extra_vars={
|
||||
"ipa_images_update_ironic_nodes": True,
|
||||
}
|
||||
),
|
||||
]
|
||||
self.assertEqual(expected_calls, mock_run.call_args_list)
|
||||
|
||||
@mock.patch.object(commands.KayobeAnsibleMixin,
|
||||
"run_kayobe_playbooks")
|
||||
def test_baremetal_compute_update_deployment_image_with_limit(
|
||||
self, mock_run):
|
||||
command = commands.BaremetalComputeUpdateDeploymentImage(TestApp(), [])
|
||||
parser = command.get_parser("test")
|
||||
parsed_args = parser.parse_args(["--baremetal-compute-limit",
|
||||
"sand-6-1"])
|
||||
result = command.run(parsed_args)
|
||||
self.assertEqual(0, result)
|
||||
expected_calls = [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"ansible/overcloud-ipa-images.yml",
|
||||
],
|
||||
extra_vars={
|
||||
"ipa_images_compute_node_limit": "sand-6-1",
|
||||
"ipa_images_update_ironic_nodes": True,
|
||||
}
|
||||
),
|
||||
]
|
||||
self.assertEqual(expected_calls, mock_run.call_args_list)
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Adds a new command, ``kayobe baremetal compute update deployment image``,
|
||||
which will update the deploy_kernel and deploy_ramdisk ironic node
|
||||
properties.
|
@ -33,6 +33,7 @@ kayobe.cli=
|
||||
baremetal_compute_manage = kayobe.cli.commands:BaremetalComputeManage
|
||||
baremetal_compute_provide = kayobe.cli.commands:BaremetalComputeProvide
|
||||
baremetal_compute_rename = kayobe.cli.commands:BaremetalComputeRename
|
||||
baremetal_compute_update_deployment_image = kayobe.cli.commands:BaremetalComputeUpdateDeploymentImage
|
||||
control_host_bootstrap = kayobe.cli.commands:ControlHostBootstrap
|
||||
control_host_upgrade = kayobe.cli.commands:ControlHostUpgrade
|
||||
configuration_dump = kayobe.cli.commands:ConfigurationDump
|
||||
|
Loading…
Reference in New Issue
Block a user