kolla-ansible/ansible/roles/ceph/tasks/bootstrap_osds.yml
Michal (inc0) Jastrzebski 8530bc10a2 Enable deploying ceph on loopback devices
Since whole issue was related to check whether user wants to wipe
device, loopbacks can be opt out from this warnings

Change-Id: Idd823b282e3055457ed041a98c848deb8509cc30
Closes-Bug: #1667074
2017-06-12 11:19:13 -07:00

119 lines
5.3 KiB
YAML

---
- name: Looking up disks to bootstrap for Ceph OSDs
command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
-m find_disks
-a "partition_name='KOLLA_CEPH_OSD_BOOTSTRAP' match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_lookup
changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph OSDs
set_fact:
osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- name: Looking up disks to bootstrap for Ceph Cache OSDs
command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
-m find_disks
-a "partition_name='KOLLA_CEPH_OSD_CACHE_BOOTSTRAP' match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_cache_lookup
changed_when: "{{ osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph Cache OSDs
set_fact:
osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_bootstrap|default([]) }}"
when:
- item.external_journal | bool == False
- item.device.split('/')[2] in ansible_devices # if there is no device in setup (like loopback, we don't need to warn user
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph OSDs
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_{{ item.0 }}"
privileged: True
restart_policy: "never"
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_bootstrap|default([]) }}"
- pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_cache_bootstrap|default([]) }}"
when:
- item.external_journal | bool == False
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph Cache OSDs
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
CEPH_CACHE:
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_cache_{{ item.0 }}"
privileged: True
restart_policy: "never"
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_cache_bootstrap|default([]) }}"