Files
openstack-ansible/tests/roles/bootstrap-host/templates/user_variables_ceph.yml.j2
Dmitriy Rabotyagov bb1f0acbe2 [aio] Merge ceph configuration files
With related bug [1] being fixed, we can finally merge ceph
configuration files together.

[1] https://bugs.launchpad.net/openstack-ansible/+bug/1649381

Change-Id: I7c10e7fa49af1d3f9c8b81e31f64b300d85b6011
Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
2025-09-22 14:12:08 +00:00

58 lines
2.1 KiB
Django/Jinja

---
# Copyright 2017, Logan Vig <logan2211@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## ceph-ansible AIO settings
is_hci: true
monitor_interface: "{{ ('metal' in bootstrap_host_scenarios_expanded) | ternary('br-storage', 'eth2') }}" # Storage network in the AIO
public_network: "{{ storage_network }}"
lvm_volumes:
{% for osd in ceph_osd_images %}
- data_vg: vg-{{ osd }}
data: lv-{{ osd }}
{% endfor %}
journal_size: 100
osd_scenario: collocated
ceph_conf_overrides_custom:
global:
mon_max_pg_per_osd: 500
osd_crush_chooseleaf_type: 0
openstack_config: true # Ceph ansible automatically creates pools & keys
cinder_default_volume_type: aio_ceph
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms
# NOTE(noonedeadpunk): ceph bug to track the issue https://tracker.ceph.com/issues/46295
tempest_test_includelist_ceph:
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
# NOTE(noonedeadpunk)L We want here to run _only_ this test as otherwise we might be short on memory
tempest_test_search_includelist_pattern: tempest_test_includelist_ceph
cinder_backends:
aio_ceph:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_pool: volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_store_chunk_size: 8
rbd_exclusive_cinder_pool: true
volume_backend_name: rbddriver
rbd_user: "{% raw %}{{ cinder_ceph_client }}{% endraw %}"
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
report_discard_supported: true
extra_volume_types:
- low-iops
- high-iops
- ultra-high-iops