Set osd_pool_default_size based on deployment
The ceph.conf file packaged in the Ceph RPM sets 'osd_pool_default_size = 2'. This is a valid initial value for most deployments. The exception is for the AIO-SX single OSD installation (which is our default minimum AIO-SX configuration). In this deployment configuration, this value will produce a HEALTH_WARN specifying "Degraded data redundancy". This commit will set 'osd_pool_default_size' based on the deployment and specifically set it to '1' for the AIO-SX. This will provide a HEALTH_OK cluster on controller unlock. If/when additional OSDs are added, the 'system storage-backend-modify' command can be used to change the replication factor to provide a higher level of data redundancy. This change removes the long-stanging need to run the following command when provisioning the AIO-SX: ceph osd pool ls | xargs -i ceph osd pool set {} size 1 This will also now enable automatic loading of the platform-integ-apps k8s application and subsequent loading of the rbd-provisioner for persistent volume claims on the AIO-SX. Change-Id: I901b339f1c7770aa16a7bbfecf193d0c1e5e9eaa Story: 2005424 Task: 33471 Signed-off-by: Robert Church <robert.church@windriver.com>
This commit is contained in:
parent
0897bdcb98
commit
d2f6c88f90
@ -1,2 +1,2 @@
|
|||||||
SRC_DIR="src"
|
SRC_DIR="src"
|
||||||
TIS_PATCH_VER=88
|
TIS_PATCH_VER=89
|
||||||
|
@ -53,19 +53,24 @@ class platform::ceph
|
|||||||
if $system_mode == 'simplex' {
|
if $system_mode == 'simplex' {
|
||||||
# 1 node configuration, a single monitor is available
|
# 1 node configuration, a single monitor is available
|
||||||
$mon_initial_members = $mon_0_host
|
$mon_initial_members = $mon_0_host
|
||||||
|
$osd_pool_default_size = 1
|
||||||
} else {
|
} else {
|
||||||
# 2 node configuration, we have a floating monitor
|
# 2 node configuration, we have a floating monitor
|
||||||
$mon_initial_members = $floating_mon_host
|
$mon_initial_members = $floating_mon_host
|
||||||
|
$osd_pool_default_size = 2
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
# Multinode & standard, any 2 monitors form a cluster
|
# Multinode & standard, any 2 monitors form a cluster
|
||||||
$mon_initial_members = undef
|
$mon_initial_members = undef
|
||||||
|
$osd_pool_default_size = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
class { '::ceph':
|
class { '::ceph':
|
||||||
fsid => $cluster_uuid,
|
fsid => $cluster_uuid,
|
||||||
authentication_type => $authentication_type,
|
authentication_type => $authentication_type,
|
||||||
mon_initial_members => $mon_initial_members
|
mon_initial_members => $mon_initial_members,
|
||||||
|
osd_pool_default_size => $osd_pool_default_size,
|
||||||
|
osd_pool_default_min_size => 1
|
||||||
}
|
}
|
||||||
-> ceph_config {
|
-> ceph_config {
|
||||||
'mon/mon clock drift allowed': value => '.1';
|
'mon/mon clock drift allowed': value => '.1';
|
||||||
|
Loading…
Reference in New Issue
Block a user