[aio] Merge ceph configuration files
With related bug [1] being fixed, we can finally merge ceph configuration files together. [1] https://bugs.launchpad.net/openstack-ansible/+bug/1649381 Change-Id: I7c10e7fa49af1d3f9c8b81e31f64b300d85b6011 Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
This commit is contained in:
@@ -75,35 +75,3 @@
|
||||
vg: "vg-{{ item }}"
|
||||
size: 100%FREE
|
||||
loop: "{{ ceph_osd_images }}"
|
||||
|
||||
# TODO(logan): Move these vars to user_variables.ceph.yml.j2 once LP #1649381
|
||||
# is fixed and eliminate this task.
|
||||
- name: Write ceph cluster config
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
---
|
||||
lvm_volumes:
|
||||
{% for osd in ceph_osd_images %}
|
||||
- data_vg: vg-{{ osd }}
|
||||
data: lv-{{ osd }}
|
||||
{% endfor %}
|
||||
cinder_backends:
|
||||
aio_ceph:
|
||||
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
||||
rbd_pool: volumes
|
||||
rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||
rbd_store_chunk_size: 8
|
||||
rbd_exclusive_cinder_pool: true
|
||||
volume_backend_name: rbddriver
|
||||
rbd_user: "{% raw %}{{ cinder_ceph_client }}{% endraw %}"
|
||||
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
|
||||
report_discard_supported: true
|
||||
extra_volume_types:
|
||||
- low-iops
|
||||
- high-iops
|
||||
- ultra-high-iops
|
||||
dest: /etc/openstack_deploy/user_ceph_aio.yml
|
||||
force: false
|
||||
become: false
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
|
@@ -17,6 +17,11 @@
|
||||
is_hci: true
|
||||
monitor_interface: "{{ ('metal' in bootstrap_host_scenarios_expanded) | ternary('br-storage', 'eth2') }}" # Storage network in the AIO
|
||||
public_network: "{{ storage_network }}"
|
||||
lvm_volumes:
|
||||
{% for osd in ceph_osd_images %}
|
||||
- data_vg: vg-{{ osd }}
|
||||
data: lv-{{ osd }}
|
||||
{% endfor %}
|
||||
journal_size: 100
|
||||
osd_scenario: collocated
|
||||
ceph_conf_overrides_custom:
|
||||
@@ -34,3 +39,19 @@ tempest_test_includelist_ceph:
|
||||
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
|
||||
# NOTE(noonedeadpunk)L We want here to run _only_ this test as otherwise we might be short on memory
|
||||
tempest_test_search_includelist_pattern: tempest_test_includelist_ceph
|
||||
|
||||
cinder_backends:
|
||||
aio_ceph:
|
||||
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
||||
rbd_pool: volumes
|
||||
rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||
rbd_store_chunk_size: 8
|
||||
rbd_exclusive_cinder_pool: true
|
||||
volume_backend_name: rbddriver
|
||||
rbd_user: "{% raw %}{{ cinder_ceph_client }}{% endraw %}"
|
||||
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
|
||||
report_discard_supported: true
|
||||
extra_volume_types:
|
||||
- low-iops
|
||||
- high-iops
|
||||
- ultra-high-iops
|
||||
|
Reference in New Issue
Block a user