Enhance Ceph Integration for Multiple Clusters

This patch enhances Kolla-Ansible's existing support for
multiple Ceph clusters by improving the management of
custom users, pools, and cluster names. It aligns
the Ceph integration more closely with the official Ceph
user management guidelines [1].

Keyrings now follow the format `$cluster.client.$user.keyring`,
and configurations are easier to handle when multiple Ceph
clusters are in use, especially in environments with multiple
availability zones.

Previously, workarounds were needed for keyrings and config
files, which this patch eliminates, providing a cleaner and
more structured configuration approach which follows the
Ceph best practices.

The default Kolla setup remains unaffected by these changes,
ensuring backward compatibility.
Updated documentation and examples are included.

[1] https://docs.ceph.com/en/latest/rados/operations/user-management/#keyring-management

Change-Id: I2593b6df737b384f1a5fba22f69e851c575990b4
This commit is contained in:
Michal Arbet 2024-01-30 04:31:14 +01:00
parent 6faae441bd
commit 66534e9dc5
24 changed files with 218 additions and 152 deletions

View File

@ -1259,6 +1259,8 @@ horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
external_ceph_cephx_enabled: "yes"
ceph_cluster: "ceph"
# External Ceph pool names
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
@ -1273,14 +1275,6 @@ ceph_gnocchi_user: "gnocchi"
ceph_manila_user: "manila"
ceph_nova_user: "{{ ceph_cinder_user }}"
# External Ceph keyrings
ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#####################
# VMware support
######################

View File

@ -264,11 +264,13 @@ cinder_backend_pure_nvme_tcp_name: "Pure-FlashArray-nvme-tcp"
cinder_ceph_backends:
- name: "{{ cinder_backend_ceph_name }}"
cluster: "ceph"
cluster: "{{ ceph_cluster }}"
user: "{{ ceph_cinder_user }}"
pool: "{{ ceph_cinder_pool_name }}"
enabled: "{{ cinder_backend_ceph | bool }}"
cinder_backup_backend_ceph_name: "rbd-1"
cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first }}"
cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first | combine({'pool': ceph_cinder_backup_pool_name, 'user': ceph_cinder_backup_user }) }}"
skip_cinder_backend_check: False

View File

@ -34,13 +34,13 @@
- service_name in services_need_config
with_nested:
- "{{ cinder_services | dict2items }}"
- "{{ cinder_ceph_backends }}"
- "{{ cinder_ceph_backends + [cinder_backup_ceph_backend] }}"
notify:
- "Restart {{ service_name }} container"
- name: Copy over Ceph keyring files for cinder-volume
vars:
keyring: "{{ item.cluster }}.{{ ceph_cinder_keyring }}"
keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
service: "{{ cinder_services['cinder-volume'] }}"
template:
src: "{{ node_custom_config }}/cinder/cinder-volume/{{ keyring }}"
@ -57,15 +57,15 @@
- name: Copy over Ceph keyring files for cinder-backup
vars:
service: "{{ cinder_services['cinder-backup'] }}"
keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
template:
src: "{{ node_custom_config }}/cinder/cinder-backup/{{ item }}"
dest: "{{ node_config_directory }}/cinder-backup/ceph/{{ item }}"
src: "{{ node_custom_config }}/cinder/cinder-backup/{{ keyring }}"
dest: "{{ node_config_directory }}/cinder-backup/ceph/{{ keyring }}"
mode: "0660"
become: true
register: cinder_backup_ceph_keyring
with_items:
- "{{ cinder_backup_ceph_backend.cluster }}.{{ ceph_cinder_keyring }}"
- "{{ cinder_backup_ceph_backend.cluster }}.{{ ceph_cinder_backup_keyring }}"
- "{{ cinder_ceph_backends }}"
- "{{ cinder_backup_ceph_backend }}"
when:
- external_ceph_cephx_enabled | bool
- service | service_enabled_and_mapped_to_host

View File

@ -25,16 +25,18 @@ cluster = {{ cinder_cluster_name }}
{% endif %}
{% if cinder_enabled_backends %}
{% if service_name == 'cinder-volume' %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
{% endif %}
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
{% if cinder_backup_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/{{ cinder_backup_ceph_backend.cluster }}.conf
backup_ceph_user = {{ ceph_cinder_backup_user }}
backup_ceph_conf = /etc/ceph/{{ cinder_backup_ceph_backend['cluster'] }}.conf
backup_ceph_user = {{ cinder_backup_ceph_backend['user'] }}
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
backup_ceph_pool = {{ cinder_backup_ceph_backend['pool'] }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
@ -150,16 +152,17 @@ target_protocol = iscsi
{% endif %}
{% if cinder_backend_ceph | bool %}
{% if service_name == 'cinder-volume' %}
{% for backend in cinder_ceph_backends %}
[{{ backend.name }}]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = {{ backend.name }}
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_pool = {{ backend.pool }}
rbd_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
rados_connect_timeout = 5
rbd_user = {{ ceph_cinder_user }}
rbd_user = {{ backend.user }}
rbd_cluster_name = {{ backend.cluster }}
rbd_keyring_conf = /etc/ceph/{{ backend.cluster }}.{{ ceph_cinder_keyring }}
rbd_keyring_conf = /etc/ceph/{{ backend.cluster }}.client.{{ backend.user }}.keyring
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
{% if backend.availability_zone is defined %}
@ -167,6 +170,7 @@ backend_availability_zone = {{ backend.availability_zone }}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
[{{ cinder_backend_nfs_name }}]

View File

@ -238,7 +238,9 @@ glance_backends:
glance_ceph_backends:
- name: "rbd"
type: "rbd"
cluster: "ceph"
cluster: "{{ ceph_cluster }}"
pool: "{{ ceph_glance_pool_name }}"
user: "{{ ceph_glance_user }}"
enabled: "{{ glance_backend_ceph | bool }}"
glance_store_backends: "{{ glance_backends | selectattr('enabled', 'equalto', true) | list + glance_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"

View File

@ -25,9 +25,11 @@
- Restart glance-api container
- name: Copy over ceph Glance keyrings
vars:
keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
template:
src: "{{ node_custom_config }}/glance/{{ item.cluster }}.{{ ceph_glance_keyring }}"
dest: "{{ node_config_directory }}/glance-api/ceph/{{ item.cluster }}.{{ ceph_glance_keyring }}"
src: "{{ node_custom_config }}/glance/{{ keyring }}"
dest: "{{ node_config_directory }}/glance-api/ceph/{{ keyring }}"
mode: "0660"
become: true
with_items: "{{ glance_ceph_backends }}"

View File

@ -68,8 +68,8 @@ filesystem_store_datadir = /var/lib/glance/images/
{% if glance_backend_ceph | bool %}
{% for backend in glance_ceph_backends %}
[{{ backend.name }}]
rbd_store_user = {{ ceph_glance_user }}
rbd_store_pool = {{ ceph_glance_pool_name }}
rbd_store_user = {{ backend.user }}
rbd_store_pool = {{ backend.pool }}
rbd_store_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
{% endfor %}
{% endif %}

View File

@ -189,5 +189,3 @@ gnocchi_ks_users:
user: "{{ gnocchi_keystone_user }}"
password: "{{ gnocchi_keystone_password }}"
role: "admin"
gnocchi_ceph_cluster: "ceph"

View File

@ -12,9 +12,9 @@
- name: Copy over ceph config for Gnocchi
merge_configs:
sources:
- "{{ node_custom_config }}/gnocchi/{{ gnocchi_ceph_cluster }}.conf"
- "{{ node_custom_config }}/gnocchi/{{ item.key }}/{{ gnocchi_ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ gnocchi_ceph_cluster }}.conf"
- "{{ node_custom_config }}/gnocchi/{{ ceph_cluster }}.conf"
- "{{ node_custom_config }}/gnocchi/{{ item.key }}/{{ ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ ceph_cluster }}.conf"
mode: "0660"
become: true
with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
@ -23,8 +23,8 @@
- name: Copy over ceph Gnocchi keyrings
template:
src: "{{ node_custom_config }}/gnocchi/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}"
src: "{{ node_custom_config }}/gnocchi/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring"
mode: "0660"
become: true
with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"

View File

@ -82,8 +82,8 @@ file_basepath = /var/lib/gnocchi
driver = ceph
ceph_pool = {{ ceph_gnocchi_pool_name }}
ceph_username = {{ ceph_gnocchi_user }}
ceph_keyring = /etc/ceph/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}
ceph_conffile = /etc/ceph/{{ gnocchi_ceph_cluster }}.conf
ceph_keyring = /etc/ceph/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring
ceph_conffile = /etc/ceph/{{ ceph_cluster }}.conf
{% elif gnocchi_backend_storage == 'swift' %}
driver = swift
swift_authurl = {{ keystone_internal_url }}

View File

@ -235,14 +235,14 @@ manila_ceph_backends:
- name: "cephfsnative1"
share_name: "CEPHFS1"
driver: "cephfsnative"
cluster: "ceph"
cluster: "{{ ceph_cluster }}"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnfs1"
share_name: "CEPHFSNFS1"
driver: "cephfsnfs"
cluster: "ceph"
cluster: "{{ ceph_cluster }}"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"

View File

@ -28,8 +28,8 @@
- name: Copy over ceph Manila keyrings
template:
src: "{{ node_custom_config }}/manila/{{ item.cluster }}.{{ ceph_manila_keyring }}"
dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.{{ ceph_manila_keyring }}"
src: "{{ node_custom_config }}/manila/{{ item.cluster }}.client.{{ ceph_manila_user }}.keyring"
dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.client.{{ ceph_manila_user }}.keyring"
mode: "0660"
become: true
with_items: "{{ manila_ceph_backends }}"

View File

@ -85,6 +85,15 @@ nova_cell_config_validation:
# qemu (1, 6, 0) or later. Set to "" to disable.
nova_hw_disk_discard: "unmap"
nova_cell_ceph_backend:
cluster: "{{ ceph_cluster }}"
vms:
user: "{{ ceph_nova_user }}"
pool: "{{ ceph_nova_pool_name }}"
volumes:
user: "{{ ceph_cinder_user }}"
pool: "{{ ceph_cinder_pool_name }}"
####################
# Cells Options
####################
@ -528,8 +537,6 @@ nova_notification_topics:
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
nova_ceph_cluster: "ceph"
####################
# VMware
####################

View File

@ -1,7 +1,7 @@
---
- name: Check nova keyring file
vars:
keyring: "{{ nova_ceph_cluster }}.{{ ceph_nova_keyring }}"
keyring: "{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
- "{{ node_custom_config }}/nova/{{ keyring }}"
@ -16,7 +16,7 @@
- name: Check cinder keyring file
vars:
keyring: "{{ nova_ceph_cluster }}.{{ ceph_cinder_keyring }}"
keyring: "{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['volumes']['user'] }}.keyring"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
- "{{ node_custom_config }}/nova/{{ keyring }}"
@ -85,8 +85,8 @@
vars:
service: "{{ nova_cell_services[item] }}"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
template:
src: "{{ lookup('first_found', paths) }}"
dest: "{{ node_config_directory }}/{{ item }}/"
@ -107,8 +107,8 @@
- name: Ensure /etc/ceph directory exists (host libvirt)
vars:
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
file:
path: "/etc/ceph/"
state: "directory"
@ -120,11 +120,11 @@
- name: Copy over ceph.conf (host libvirt)
vars:
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
template:
src: "{{ lookup('first_found', paths) }}"
dest: "/etc/ceph/{{ nova_ceph_cluster }}.conf"
dest: "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf"
owner: "root"
group: "root"
mode: "0644"

View File

@ -14,14 +14,14 @@
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" %},
{
"source": "{{ container_config_directory }}/ceph.{{ ceph_nova_keyring }}",
"dest": "/etc/ceph/ceph.{{ ceph_nova_keyring }}",
"source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring",
"dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring",
"owner": "nova",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ nova_ceph_cluster }}.conf",
"dest": "/etc/ceph/{{ nova_ceph_cluster }}.conf",
"source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if nova_compute_virt_type == "vmware" and not vmware_vcenter_insecure | bool %},

View File

@ -51,8 +51,8 @@
"merge": true
}{% endif %}{% if nova_backend == "rbd" %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if libvirt_enable_sasl | bool %},

View File

@ -9,9 +9,9 @@ live_migration_inbound_addr = "{{ migration_interface_address }}"
{% endif %}
{% if nova_backend == "rbd" %}
images_type = rbd
images_rbd_pool = {{ ceph_nova_pool_name }}
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = {{ ceph_nova_user }}
images_rbd_pool = {{ nova_cell_ceph_backend['vms']['pool'] }}
images_rbd_ceph_conf = /etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf
rbd_user = {{ nova_cell_ceph_backend['vms']['user'] }}
disk_cachemodes="network=writeback"
{% if nova_hw_disk_discard != '' %}
hw_disk_discard = {{ nova_hw_disk_discard }}

View File

@ -244,5 +244,3 @@ zun_ks_users:
user: "{{ zun_keystone_user }}"
password: "{{ zun_keystone_password }}"
role: "admin"
zun_ceph_cluster: "ceph"

View File

@ -1,7 +1,7 @@
---
- name: Copying over ceph.conf for Zun
copy:
src: "{{ node_custom_config }}/zun/zun-compute/{{ zun_ceph_cluster }}.conf"
src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true
@ -10,7 +10,7 @@
- name: Copy over Ceph keyring files for zun-compute
copy:
src: "{{ node_custom_config }}/zun/zun-compute/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}"
src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true

View File

@ -8,15 +8,15 @@
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}",
"dest": "/etc/ceph/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}",
"source": "{{ container_config_directory }}/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
"dest": "/etc/ceph/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/{{ zun_ceph_cluster }}.conf",
"dest": "/etc/ceph/{{ zun_ceph_cluster }}.conf",
"source": "{{ container_config_directory }}/{{ ceph_cluster }}.conf",
"dest": "/etc/ceph/{{ ceph_cluster }}.conf",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}

View File

@ -37,6 +37,36 @@ Ceph integration is configured for different OpenStack services independently.
Be sure to remove the leading tabs from your ``ceph.conf`` files when
copying them in the following sections.
When openstack services access Ceph via a Ceph client, the Ceph client will
look for a local keyring. Ceph presets the keyring setting with four keyring
names by default.
* The four default keyring names are as follows:
* ``/etc/ceph/$cluster.$name.keyring``
* ``/etc/ceph/$cluster.keyring``
* ``/etc/ceph/keyring``
* ``/etc/ceph/keyring.bin``
The ``$cluster`` metavariable found in the first two default keyring names
above is your Ceph cluster name as defined by the name of the Ceph
configuration file: for example, if the Ceph configuration file is named
``ceph.conf``, then your Ceph cluster name is ceph and the second name above
would be ``ceph.keyring``. The ``$name`` metavariable is the user type and
user ID: for example, given the user ``client.admin``, the first name above
would be ``ceph.client.admin.keyring``. This principle is applied in the
services documentation below.
.. note::
More information about user configuration and related keyrings can be found in the
official Ceph documentation at https://docs.ceph.com/en/latest/rados/operations/user-management/#keyring-management
.. note::
Below examples uses default ``$cluster`` and ``$user`` which can be configured
via kolla-ansible by setting ``ceph_cluster``,``$user`` per project or on the
host level (nova) in inventory file.
Glance
------
@ -52,7 +82,6 @@ for Ceph includes the following steps:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_glance_keyring`` (default: ``client.glance.keyring``)
* ``ceph_glance_user`` (default: ``glance``)
* ``ceph_glance_pool_name`` (default: ``images``)
@ -70,7 +99,7 @@ for Ceph includes the following steps:
auth_service_required = cephx
auth_client_required = cephx
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.<ceph_glance_keyring>``
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.client.glance.keyring``
To configure multiple Ceph backends with Glance, which is useful
for multistore:
@ -78,28 +107,28 @@ for multistore:
* Copy the Ceph configuration files into ``/etc/kolla/config/glance/`` using
different names for each
``/etc/kolla/config/glance/ceph.conf``
``/etc/kolla/config/glance/ceph1.conf``
.. path /etc/kolla/config/glance/ceph.conf
.. path /etc/kolla/config/glance/ceph1.conf
.. code-block:: ini
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
keyring = /etc/ceph/ceph.client.glance.keyring
keyring = /etc/ceph/ceph1.client.glance.keyring
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/glance/rbd1.conf``
``/etc/kolla/config/glance/ceph2.conf``
.. path /etc/kolla/config/glance/rbd1.conf
.. path /etc/kolla/config/glance/ceph2.conf
.. code-block:: ini
[global]
fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
keyring = /etc/ceph/rbd1.client.glance.keyring
keyring = /etc/ceph/ceph2.client.glance.keyring
mon_initial_members = ceph-0
mon_host = 192.10.0.100
auth_cluster_required = cephx
@ -111,17 +140,21 @@ for multistore:
.. code-block:: yaml
glance_ceph_backends:
- name: "rbd"
- name: "ceph1-rbd"
type: "rbd"
cluster: "ceph"
cluster: "ceph1"
user: "glance"
pool: "images"
enabled: "{{ glance_backend_ceph | bool }}"
- name: "another-rbd"
- name: "ceph2-rbd"
type: "rbd"
cluster: "rbd1"
cluster: "ceph2"
user: "glance"
pool: "images"
enabled: "{{ glance_backend_ceph | bool }}"
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.<ceph_glance_keyring>``
and analogously to ``/etc/kolla/config/glance/rbd1.<ceph_glance_keyring>``
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph1.client.glance.keyring``
and analogously to ``/etc/kolla/config/glance/ceph2.client.glance.keyring``
* For copy-on-write set following in ``/etc/kolla/config/glance.conf``:
@ -161,11 +194,8 @@ Cinder for Ceph includes following steps:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_cinder_keyring`` (default: ``client.cinder.keyring``)
* ``ceph_cinder_user`` (default: ``cinder``)
* ``ceph_cinder_pool_name`` (default: ``volumes``)
* ``ceph_cinder_backup_keyring``
(default: ``client.cinder-backup.keyring``)
* ``ceph_cinder_backup_user`` (default: ``cinder-backup``)
* ``ceph_cinder_backup_pool_name`` (default: ``backups``)
@ -179,15 +209,15 @@ Cinder for Ceph includes following steps:
* Copy Ceph keyring files to:
* ``/etc/kolla/config/cinder/cinder-volume/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.
<ceph_cinder_backup_keyring>``
* ``/etc/kolla/config/cinder/cinder-volume/ceph.client.cinder.keyring``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.client.cinder.keyring``
* ``/etc/kolla/config/cinder/cinder-backup/
ceph.client.cinder-backup.keyring``
.. note::
``cinder-backup`` requires two keyrings for accessing volumes
and backup pool.
``cinder-backup`` requires keyrings for accessing volumes
and backups pools.
To configure ``multiple Ceph backends`` with Cinder, which is useful for
the use with availability zones:
@ -195,9 +225,9 @@ the use with availability zones:
* Copy their Ceph configuration files into ``/etc/kolla/config/cinder/`` using
different names for each
``/etc/kolla/config/cinder/ceph.conf``
``/etc/kolla/config/cinder/ceph1.conf``
.. path /etc/kolla/config/cinder/ceph.conf
.. path /etc/kolla/config/cinder/ceph1.conf
.. code-block:: ini
[global]
@ -208,9 +238,9 @@ the use with availability zones:
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/cinder/rbd2.conf``
``/etc/kolla/config/cinder/ceph2.conf``
.. path /etc/kolla/config/cinder/rbd2.conf
.. path /etc/kolla/config/cinder/ceph2.conf
.. code-block:: ini
[global]
@ -226,46 +256,63 @@ the use with availability zones:
.. code-block:: yaml
cinder_ceph_backends:
- name: "rbd-1"
cluster: "ceph"
- name: "ceph1-rbd"
cluster: "ceph1"
user: "cinder"
pool: "volumes"
enabled: "{{ cinder_backend_ceph | bool }}"
- name: "rbd-2"
cluster: "rbd2"
- name: "ceph2-rbd"
cluster: "ceph2"
user: "cinder"
pool: "volumes"
availability_zone: "az2"
enabled: "{{ cinder_backend_ceph | bool }}"
cinder_backup_ceph_backend:
name: "ceph2-backup-rbd"
cluster: "ceph2"
user: "cinder-backup"
pool: "backups"
type: rbd
enabled: "{{ enable_cinder_backup | bool }}"
* Copy Ceph keyring files for all Ceph backends:
* ``/etc/kolla/config/cinder/cinder-volume/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.
<ceph_cinder_backup_keyring>``
* ``/etc/kolla/config/cinder/cinder-volume/rbd2.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/rbd2.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/rbd2.
<ceph_cinder_backup_keyring>``
* ``/etc/kolla/config/cinder/cinder-volume/ceph1.client.cinder.keyring``
* ``/etc/kolla/config/cinder/cinder-backup/ceph1.client.cinder.keyring``
* ``/etc/kolla/config/cinder/cinder-backup/ceph2.client.cinder.keyring``
* ``/etc/kolla/config/cinder/cinder-backup/
ceph2.client.cinder-backup.keyring``
.. note::
``cinder-backup`` requires two keyrings for accessing volumes
and backup pool.
``cinder-backup`` requires keyrings for accessing volumes
and backups pool.
Nova must also be configured to allow access to Cinder volumes:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Copy Ceph config and keyring file(s) to:
* ``ceph_cinder_keyring`` (default: ``client.cinder.keyring``)
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/nova/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/nova/ceph.conf``
* ``/etc/kolla/config/nova/ceph.client.cinder.keyring``
To configure ``different Ceph backend`` for nova-compute host, which
is useful for the use with availability zones:
* Copy Ceph keyring file to:
* Edit inventory file in the way described below:
* ``/etc/kolla/config/nova/<hostname>/ceph.<ceph_cinder_keyring>``
.. code-block:: ini
[compute]
hostname1 ceph_cluster=ceph1
hostname2 ceph_cluster=ceph2
* Copy Ceph config and keyring file(s):
* ``/etc/kolla/config/nova/<hostname1>/ceph1.conf``
* ``/etc/kolla/config/nova/<hostname1>/ceph1.client.cinder.keyring``
* ``/etc/kolla/config/nova/<hostname2>/ceph2.conf``
* ``/etc/kolla/config/nova/<hostname2>/ceph2.client.cinder.keyring``
If ``zun`` is enabled, and you wish to use cinder volumes with zun,
it must also be configured to allow access to Cinder volumes:
@ -282,7 +329,7 @@ it must also be configured to allow access to Cinder volumes:
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/zun/zun-compute/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/zun/zun-compute/ceph.client.cinder.keyring``
Nova
@ -303,30 +350,37 @@ Configuring Nova for Ceph includes following steps:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_nova_keyring`` (by default it's the same as
``ceph_cinder_keyring``)
* ``ceph_nova_user`` (by default it's the same as ``ceph_cinder_user``)
* ``ceph_nova_pool_name`` (default: ``vms``)
* Copy Ceph configuration file to ``/etc/kolla/config/nova/ceph.conf``
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/nova/ceph.<ceph_nova_keyring>``
* ``/etc/kolla/config/nova/ceph.client.nova.keyring``
.. note::
If you are using a Ceph deployment tool that generates separate Ceph
keys for Cinder and Nova, you will need to override
``ceph_nova_keyring`` and ``ceph_nova_user`` to match.
``ceph_nova_user`` to match.
To configure ``different Ceph backend`` for nova-compute host, which
is useful for the use with availability zones:
* Copy Ceph configuration file to ``/etc/kolla/config/nova/
<hostname>/ceph.conf``
* Copy Ceph keyring file(s) to:
Edit inventory file in the way described below:
* ``/etc/kolla/config/nova/<hostname>/ceph.<ceph_nova_keyring>``
.. code-block:: ini
[compute]
hostname1 ceph_cluster=ceph1
hostname2 ceph_cluster=ceph2
* Copy Ceph config and keyring file(s):
* ``/etc/kolla/config/nova/<hostname1>/ceph1.conf``
* ``/etc/kolla/config/nova/<hostname1>/ceph1.client.nova.keyring``
* ``/etc/kolla/config/nova/<hostname2>/ceph2.conf``
* ``/etc/kolla/config/nova/<hostname2>/ceph2.client.nova.keyring``
Gnocchi
-------
@ -342,17 +396,13 @@ Configuring Gnocchi for Ceph includes following steps:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_gnocchi_keyring``
(default: ``client.gnocchi.keyring``)
* ``ceph_gnocchi_user`` (default: ``gnocchi``)
* ``ceph_gnocchi_pool_name`` (default: ``gnocchi``)
* ``ceph_gnocchi_conf``
(default: ``ceph.conf``)
* Copy Ceph configuration file to
``/etc/kolla/config/gnocchi/<ceph_gnocchi_conf>``
``/etc/kolla/config/gnocchi/ceph.conf``
* Copy Ceph keyring to
``/etc/kolla/config/gnocchi/ceph.<ceph_gnocchi_keyring>``
``/etc/kolla/config/gnocchi/ceph.client.gnocchi.keyring``
Manila
------
@ -368,7 +418,6 @@ for Ceph includes following steps:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_manila_keyring`` (default: ``client.manila.keyring``)
* ``ceph_manila_user`` (default: ``manila``)
.. note::
@ -377,7 +426,7 @@ for Ceph includes following steps:
:manila-doc:`CephFS Native driver <admin/cephfs_driver.html#authorizing-the-driver-to-communicate-with-ceph>`.
* Copy Ceph configuration file to ``/etc/kolla/config/manila/ceph.conf``
* Copy Ceph keyring to ``/etc/kolla/config/manila/ceph.<ceph_manila_keyring>``
* Copy Ceph keyring to ``/etc/kolla/config/manila/ceph.client.manila.keyring``
To configure ``multiple Ceph backends`` with Manila, which is useful for
the use with availability zones:
@ -385,9 +434,9 @@ the use with availability zones:
* Copy their Ceph configuration files into ``/etc/kolla/config/manila/`` using
different names for each
``/etc/kolla/config/manila/ceph.conf``
``/etc/kolla/config/manila/ceph1.conf``
.. path /etc/kolla/config/manila/ceph.conf
.. path /etc/kolla/config/manila/ceph1.conf
.. code-block:: ini
[global]
@ -398,9 +447,9 @@ the use with availability zones:
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/manila/rbd2.conf``
``/etc/kolla/config/manila/ceph2.conf``
.. path /etc/kolla/config/manila/rbd2.conf
.. path /etc/kolla/config/manila/ceph2.conf
.. code-block:: ini
[global]
@ -419,14 +468,14 @@ the use with availability zones:
- name: "cephfsnative1"
share_name: "CEPHFS1"
driver: "cephfsnative"
cluster: "ceph"
cluster: "ceph1"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnative2"
share_name: "CEPHFS2"
driver: "cephfsnative"
cluster: "rbd2"
cluster: "ceph2"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
@ -441,7 +490,7 @@ the use with availability zones:
- name: "cephfsnfs2"
share_name: "CEPHFSNFS2"
driver: "cephfsnfs"
cluster: "rbd2"
cluster: "ceph2"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
@ -449,8 +498,8 @@ the use with availability zones:
* Copy Ceph keyring files for all Ceph backends:
* ``/etc/kolla/config/manila/manila-share/ceph.<ceph_manila_keyring>``
* ``/etc/kolla/config/manila/manila-share/rbd2.<ceph_manila_keyring>``
* ``/etc/kolla/config/manila/manila-share/ceph1.client.manila.keyring``
* ``/etc/kolla/config/manila/manila-share/ceph2.client.manila.keyring``
* If using multiple filesystems (Ceph Pacific+), set
``manila_cephfs_filesystem_name`` in ``/etc/kolla/globals.yml`` to the

View File

@ -75,8 +75,7 @@ Limitations and Recommendations
.. note::
If you have separate keys for nova and cinder, please be sure to set
``ceph_nova_keyring: ceph.client.nova.keyring`` and ``ceph_nova_user: nova``
in ``/etc/kolla/globals.yml``
``ceph_nova_user: nova`` in ``/etc/kolla/globals.yml``
Preparation (the foreword)
--------------------------

View File

@ -465,26 +465,20 @@ workaround_ansible_issue_8743: yes
# Glance
#ceph_glance_user: "glance"
#ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
#ceph_glance_pool_name: "images"
# Cinder
#ceph_cinder_user: "cinder"
#ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
#ceph_cinder_pool_name: "volumes"
#ceph_cinder_backup_user: "cinder-backup"
#ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
#ceph_cinder_backup_pool_name: "backups"
# Nova
#ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#ceph_nova_user: "{{ ceph_cinder_user }}"
#ceph_nova_pool_name: "vms"
# Gnocchi
#ceph_gnocchi_user: "gnocchi"
#ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
#ceph_gnocchi_pool_name: "gnocchi"
# Manila
#ceph_manila_user: "manila"
#ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
#############################
# Keystone - Identity Options

View File

@ -0,0 +1,17 @@
---
features:
- |
Improved the handling of multiple Ceph clusters in Kolla-Ansible
by allowing explicit configuration of users, pools, and cluster
names, following the official Ceph keyring format
``$cluster.client.$user.keyring``.
upgrade:
- |
The variables ``ceph_cinder_keyring``, ``ceph_cinder_backup_keyring``,
``ceph_glance_keyring``, ``ceph_gnocchi_keyring``, ``ceph_manila_keyring``,
and ``ceph_nova_keyring`` have been removed, and their values are now
automatically derived from the configurable Ceph users. Users who have
relied on completely different keyrings or custom user configurations
should ensure their setups are correctly aligned with the new convention
as per `documentation
<https://docs.openstack.org/kolla-ansible/latest/reference/storage/external-ceph-guide.html>`__.