c94e0b4b0e
The approach here is to use the availability zone as an imaginary rack. All hosts that are in the same AZ will be in the same imaginary rack. From Ceph's perspective this doesn't matter as it's just a bucket after all. This will give users the ability to further customize their ceph deployment. Change-Id: Ie25ac1b001db558d6a40fe3eaca014e8f4174241
49 lines
1.1 KiB
Plaintext
49 lines
1.1 KiB
Plaintext
[global]
|
|
{% if old_auth %}
|
|
auth supported = {{ auth_supported }}
|
|
{% else %}
|
|
auth cluster required = {{ auth_supported }}
|
|
auth service required = {{ auth_supported }}
|
|
auth client required = {{ auth_supported }}
|
|
{% endif %}
|
|
keyring = /etc/ceph/$cluster.$name.keyring
|
|
mon host = {{ mon_hosts }}
|
|
fsid = {{ fsid }}
|
|
|
|
log to syslog = {{ use_syslog }}
|
|
err to syslog = {{ use_syslog }}
|
|
clog to syslog = {{ use_syslog }}
|
|
debug osd = {{ loglevel }}/5
|
|
|
|
{%- if ceph_public_network is string %}
|
|
public network = {{ ceph_public_network }}
|
|
{%- endif %}
|
|
{%- if ceph_cluster_network is string %}
|
|
cluster network = {{ ceph_cluster_network }}
|
|
{%- endif %}
|
|
|
|
{% if public_addr %}
|
|
public addr = {{ public_addr }}
|
|
{% endif %}
|
|
{% if cluster_addr %}
|
|
cluster addr = {{ cluster_addr }}
|
|
{%- endif %}
|
|
|
|
{% if crush_location %}
|
|
osd crush location = {{crush_location}}
|
|
{% endif %}
|
|
|
|
|
|
[mon]
|
|
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
|
|
|
|
[mds]
|
|
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
|
|
|
[osd]
|
|
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
|
|
osd journal size = {{ osd_journal_size }}
|
|
filestore xattr use omap = true
|
|
journal dio = {{ dio }}
|
|
|