xionglingfeng 17e6e629f5 Allow cinder-volume to be configured to use NFS
Allow cinder-volume, nova-compute and nova-libvirtd to be configured to
use NFS. In order to mount and work with NFS shares, several containers
needed the NFS packages installed during build time.

One somewhat significant change is the addition of an explicit bind
volume for nova-compute that has shared mounts enabled.

According to docker-run(1), the shared mount propagation flag can only
be specified for bind mounted Docker volumes and not named volumes.

In an NFS setup, cinder-volume mounts the NFS shares so that it can
create and manage the Cinder volumes. When a new instance is created
with a Cinder volume or a Cinder volume is attached to an existing
instance, nova-compute mounts the Cinder volume from the NFS share for
nova-libvirtd. In order for nova-libvirtd to then see those Cinder
volumes the shared mounts flag must be enabled for the Docker volume.

Remove the rpcbind container as it is only necessary for operators who
are using NFSv3 or lower. There is no known need for this currently
however, this container can be added in the future should an operator
require it.

Co-authored-by: Ryan Hallisey <rhallise@redhat.com>
Co-authored-by: Andrew Widdersheim <amwiddersheim@gmail.com>
Change-Id: Iad77c05bce8876bdcc69b7ec22edd50e3bf48b9f
Closes-Bug: #1530515
Partially implements: blueprint  nfs-support-in-cinder
2016-11-07 12:57:54 -05:00

111 lines
4.0 KiB
Django/Jinja

[DEFAULT]
debug = {{ cinder_logging_debug }}
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
# Set use_stderr to False or the logs will also be sent to stderr
# and collected by Docker
use_stderr = False
enable_v1_api=false
osapi_volume_workers = {{ openstack_service_workers }}
volume_name_template = volume-%s
glance_api_servers = {% for host in groups['glance-api'] %}{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
glance_num_retries = {{ groups['glance-api'] | length }}
glance_api_version = 2
os_region_name = {{ openstack_region_name }}
{% if cinder_enabled_backends %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
{% if service_name == "cinder-backup" and enable_ceph | bool and cinder_backend_ceph | bool %}
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% endif %}
osapi_volume_listen = {{ api_interface_address }}
osapi_volume_listen_port = {{ cinder_api_port }}
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_messaging_notifications]
{% if enable_ceilometer | bool %}
driver = messagingv2
{% endif %}
[database]
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{% if orchestration_engine == 'KUBERNETES' %}{{ cinder_database_address }}{% else %}{{ cinder_database_address }}{% endif %}/{{ cinder_database_name }}
max_retries = -1
[keystone_authtoken]
{% if orchestration_engine == 'KUBERNETES' %}
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
{% else %}
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
{% endif %}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ cinder_keystone_user }}
password = {{ cinder_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
{% if enable_cinder_backend_lvm | bool %}
[lvm-1]
volume_group = {{ cinder_volume_group }}
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvm-1
iscsi_helper = tgtadm
iscsi_protocol = iscsi
{% elif enable_ceph | bool and cinder_backend_ceph | bool %}
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = {{ rbd_secret_uuid }}
report_discard_supported = True
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
[nfs-1]
volume_driver = cinder.volume.drivers.nfs.NfsDriver
volume_backend_name = nfs-1
nfs_shares_config = /etc/cinder/nfs_shares
{% endif %}
[privsep_entrypoint]
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf