[CEPH] OSH-INFRA: use loopback devices for ceph osds

- This is to make use of loopback devices for ceph osds since
support for directory backed osds going to deprecate.

- Move to bluestore from filestore for ceph-osds.
- Seperate DB and WAL partitions from data so that gates will validate
  the scenario where we will have fast storage disk for DB and WAL.

Change-Id: Ief6de17c53d6cb57ef604895fdc66dc6c604fd89
This commit is contained in:
Chinasubbareddy Mallavarapu 2020-05-13 12:32:52 -05:00 committed by chinasubbareddy mallavarapu
parent b1e66fd308
commit 3bde9f5b90
13 changed files with 103 additions and 20 deletions

View File

@ -0,0 +1 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -0,0 +1,13 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
#lets check the devices
sudo df -lh
sudo lsblk

View File

@ -0,0 +1 @@
../multinode/019-setup-ceph-loopback-device.sh

View File

@ -0,0 +1,13 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
# lets check the devices
sudo df -lh
sudo lsblk

View File

@ -69,11 +69,15 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
jobs:
ceph_defragosds:
# Execute every 15 minutes for gates
@ -94,6 +98,8 @@ manifests:
cronjob_defragosds: true
deployment_cephfs_provisioner: false
job_cephfs_client_key: false
deploy:
tool: "ceph-volume"
EOF
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do

View File

@ -0,0 +1 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -0,0 +1 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -0,0 +1 @@
../common/019-setup-ceph-loopback-device.sh

View File

@ -159,16 +159,22 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
pod:
replicas:
mds: 1
mgr: 1
rgw: 1
deploy:
tool: "ceph-volume"
jobs:
ceph_defragosds:
# Execute every 15 minutes for gates

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
#second disk for tenant-ceph
sudo mkdir -p /var/lib/openstack-helm/tenant-ceph
sudo truncate -s 10G /var/lib/openstack-helm/tenant-ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/tenant-ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop2 /var/lib/openstack-helm/tenant-ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop3 /var/lib/openstack-helm/tenant-ceph/ceph-osd-db-wal-loopbackfile.img
# lets check the devices
sudo df -lh
sudo lsblk

View File

@ -93,11 +93,14 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
storageclass:
rbd:
ceph_configmap_name: ceph-etc
@ -111,6 +114,8 @@ monitoring:
enabled: true
ceph_mgr:
port: 9283
deploy:
tool: "ceph-volume"
EOF
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do

View File

@ -131,13 +131,18 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/tenant-ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/tenant-ceph/osd/journal-one
type: bluestore
location: /dev/loop2
block_db:
location: /dev/loop3
size: "5GB"
block_wal:
location: /dev/loop3
size: "2GB"
mon:
directory: /var/lib/openstack-helm/tenant-ceph/mon
deploy:
tool: "ceph-volume"
EOF
for CHART in ceph-mon ceph-osd ceph-client; do

View File

@ -53,6 +53,7 @@
post-run: playbooks/osh-infra-collect-logs.yaml
vars:
gate_scripts:
- ./tools/deployment/multinode/019-setup-ceph-loopback-device.sh
- ./tools/deployment/multinode/010-deploy-docker-registry.sh
- ./tools/deployment/multinode/020-ingress.sh
- ./tools/deployment/multinode/030-ceph.sh
@ -95,6 +96,7 @@
post-run: playbooks/osh-infra-collect-logs.yaml
vars:
gate_scripts:
- ./tools/deployment/tenant-ceph/019-setup-ceph-loopback-device.sh
- ./tools/deployment/tenant-ceph/010-relabel-nodes.sh
- ./tools/deployment/tenant-ceph/020-ingress.sh
- ./tools/deployment/tenant-ceph/030-ceph.sh
@ -132,6 +134,7 @@
vars:
gate_scripts:
- ./tools/deployment/osh-infra-logging/000-install-packages.sh
- ./tools/deployment/osh-infra-logging/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-logging/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-logging/010-ingress.sh
- ./tools/deployment/osh-infra-logging/020-ceph.sh
@ -156,6 +159,7 @@
vars:
gate_scripts:
- ./tools/deployment/osh-infra-kafka/000-install-packages.sh
- ./tools/deployment/osh-infra-kafka/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-kafka/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-kafka/010-ingress.sh
- ./tools/deployment/osh-infra-kafka/020-ceph.sh
@ -270,6 +274,7 @@
feature_gates: apparmor
gate_scripts:
- ./tools/deployment/apparmor/000-install-packages.sh
- ./tools/deployment/apparmor/019-setup-ceph-loopback-device.sh
- ./tools/deployment/apparmor/001-setup-apparmor-profiles.sh
- ./tools/deployment/apparmor/005-deploy-k8s.sh
- ./tools/deployment/apparmor/015-ingress.sh
@ -305,6 +310,7 @@
feature_gates: apparmor
gate_scripts:
- ./tools/deployment/osh-infra-logging/000-install-packages.sh
- ./tools/deployment/osh-infra-logging/019-setup-ceph-loopback-device.sh
- ./tools/deployment/osh-infra-logging/005-deploy-k8s.sh
- ./tools/deployment/osh-infra-logging/010-ingress.sh
- ./tools/deployment/osh-infra-logging/020-ceph.sh
@ -334,6 +340,7 @@
feature_gates: apparmor
gate_scripts:
- ./tools/deployment/openstack-support/000-install-packages.sh
- ./tools/deployment/openstack-support/019-setup-ceph-loopback-device.sh
- ./tools/deployment/openstack-support/005-deploy-k8s.sh
- ./tools/deployment/openstack-support/007-namespace-config.sh
- ./tools/deployment/openstack-support/010-ingress.sh
@ -380,6 +387,7 @@
vars:
gate_scripts:
- ./tools/deployment/openstack-support/000-install-packages.sh
- ./tools/deployment/openstack-support/019-setup-ceph-loopback-device.sh
- ./tools/deployment/openstack-support/005-deploy-k8s.sh
- ./tools/deployment/openstack-support/007-namespace-config.sh
- ./tools/deployment/openstack-support/010-ingress.sh
@ -442,6 +450,7 @@
nodeset: openstack-helm-single-node
vars:
gate_scripts:
- ./tools/deployment/elastic-beats/019-setup-ceph-loopback-device.sh
- ./tools/deployment/elastic-beats/005-deploy-k8s.sh
- ./tools/deployment/elastic-beats/020-ingress.sh
- ./tools/deployment/elastic-beats/030-ceph.sh