![Tino Schmeier](/assets/img/avatar_default.png)
Cinder can be configured to support either lvm or ceph backend. Using both backends only enables lvm as the ceph backend configuration is combined with the lvm backend part via elif code. Change-Id: I57e515cf7cdd6377d60bfbc45a889553ce87207a Closes-Bug: #1654996
142 lines
5.1 KiB
Django/Jinja
142 lines
5.1 KiB
Django/Jinja
[DEFAULT]
|
|
debug = {{ cinder_logging_debug }}
|
|
|
|
log_dir = /var/log/kolla/cinder
|
|
use_forwarded_for = true
|
|
|
|
# Set use_stderr to False or the logs will also be sent to stderr
|
|
# and collected by Docker
|
|
use_stderr = False
|
|
|
|
enable_v1_api=false
|
|
osapi_volume_workers = {{ openstack_service_workers }}
|
|
volume_name_template = volume-%s
|
|
|
|
glance_api_servers = {% for host in groups['glance-api'] %}{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
glance_num_retries = {{ groups['glance-api'] | length }}
|
|
glance_api_version = 2
|
|
|
|
os_region_name = {{ openstack_region_name }}
|
|
|
|
{% if cinder_enabled_backends %}
|
|
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
|
|
{% endif %}
|
|
|
|
{% if service_name == "cinder-backup" and enable_ceph | bool and cinder_backend_ceph | bool %}
|
|
backup_driver = cinder.backup.drivers.ceph
|
|
backup_ceph_conf = /etc/ceph/ceph.conf
|
|
backup_ceph_user = cinder-backup
|
|
backup_ceph_chunk_size = 134217728
|
|
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
|
|
backup_ceph_stripe_unit = 0
|
|
backup_ceph_stripe_count = 0
|
|
restore_discard_excess_bytes = true
|
|
{% elif cinder_backup_driver == "nfs"%}
|
|
backup_driver = cinder.backup.drivers.nfs
|
|
backup_mount_options = {{ cinder_backup_mount_options_nfs }}
|
|
backup_mount_point_base = /var/lib/cinder/backup
|
|
backup_share = {{ cinder_backup_share }}
|
|
backup_file_size = 327680000
|
|
{% elif cinder_backup_driver == "swift"%}
|
|
backup_driver = cinder.backup.drivers.swift
|
|
backup_swift_url = http://{{ kolla_internal_vip_address }}:{{ swift_proxy_server_port }}/v1/AUTH_
|
|
backup_swift_auth = per_user
|
|
backup_swift_auth_version = 1
|
|
backup_swift_user =
|
|
backup_swift_key =
|
|
{% endif %}
|
|
|
|
osapi_volume_listen = {{ api_interface_address }}
|
|
osapi_volume_listen_port = {{ cinder_api_port }}
|
|
|
|
api_paste_config = /etc/cinder/api-paste.ini
|
|
nova_catalog_info = compute:nova:internalURL
|
|
|
|
auth_strategy = keystone
|
|
|
|
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
[oslo_messaging_notifications]
|
|
{% if enable_ceilometer | bool or enable_searchlight | bool %}
|
|
driver = messagingv2
|
|
topics = notifications
|
|
{% else %}
|
|
driver = noop
|
|
{% endif %}
|
|
|
|
[database]
|
|
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{% if orchestration_engine == 'KUBERNETES' %}{{ cinder_database_address }}{% else %}{{ cinder_database_address }}{% endif %}/{{ cinder_database_name }}
|
|
max_retries = -1
|
|
|
|
[keystone_authtoken]
|
|
{% if orchestration_engine == 'KUBERNETES' %}
|
|
auth_uri = {{ keystone_internal_url }}
|
|
auth_url = {{ keystone_admin_url }}
|
|
{% else %}
|
|
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
|
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
|
|
{% endif %}
|
|
auth_type = password
|
|
project_domain_id = default
|
|
user_domain_id = default
|
|
project_name = service
|
|
username = {{ cinder_keystone_user }}
|
|
password = {{ cinder_keystone_password }}
|
|
|
|
memcache_security_strategy = ENCRYPT
|
|
memcache_secret_key = {{ memcache_secret_key }}
|
|
memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
|
|
[oslo_concurrency]
|
|
lock_path = /var/lib/cinder/tmp
|
|
|
|
|
|
{% if enable_cinder_backend_lvm | bool %}
|
|
[lvm-1]
|
|
volume_group = {{ cinder_volume_group }}
|
|
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
|
volume_backend_name = lvm-1
|
|
iscsi_helper = tgtadm
|
|
iscsi_protocol = iscsi
|
|
{% endif %}
|
|
|
|
{% if enable_ceph | bool and cinder_backend_ceph | bool %}
|
|
[rbd-1]
|
|
volume_driver = cinder.volume.drivers.rbd.RBDDriver
|
|
rbd_pool = {{ ceph_cinder_pool_name }}
|
|
rbd_ceph_conf = /etc/ceph/ceph.conf
|
|
rbd_flatten_volume_from_snapshot = false
|
|
rbd_max_clone_depth = 5
|
|
rbd_store_chunk_size = 4
|
|
rados_connect_timeout = -1
|
|
rbd_user = cinder
|
|
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
|
report_discard_supported = True
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_nfs | bool %}
|
|
[nfs-1]
|
|
volume_driver = cinder.volume.drivers.nfs.NfsDriver
|
|
volume_backend_name = nfs-1
|
|
nfs_shares_config = /etc/cinder/nfs_shares
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_hnas_iscsi | bool %}
|
|
[hnas-iscsi]
|
|
volume_driver = cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver
|
|
volume_backend_name = {{ hnas_volume_backend_name }}
|
|
hnas_username = {{ hnas_username }}
|
|
hnas_password = {{ hnas_password }}
|
|
hnas_mgmt_ip0 = {{ hnas_mgmt_ip0 }}
|
|
hnas_chap_enabled = True
|
|
|
|
hnas_svc0_volume_type = {{ hnas_svc0_volume_type }}
|
|
hnas_svc0_hdp = {{ hnas_svc0_hdp }}
|
|
hnas_svc0_iscsi_ip = {{ hnas_svc0_iscsi_ip }}
|
|
{% endif %}
|
|
|
|
[privsep_entrypoint]
|
|
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
|