kolla-ansible/ansible/roles/nova/templates/nova.conf.j2
Kevin Fox 38912d8806 Cinder endpoint wrong fix
kolla genconfig is pointing at cinder v1, not v2. nova pointing
at v1 is now broken. At least when cinder is backed by ceph and
the release is >= mitaka.

Closes-Bug: #1633758

Change-Id: Idf4a26b37587f1dabe2de0c1ffbddb8c08ee3bdb
2016-10-15 12:39:08 -07:00

198 lines
7.0 KiB
Django/Jinja

# nova.conf
[DEFAULT]
debug = {{ nova_logging_debug }}
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
{% if kolla_enable_tls_external | bool %}
secure_proxy_ssl_header = X-Forwarded-Proto
{% endif %}
osapi_compute_listen = {{ api_interface_address }}
osapi_compute_listen_port = {{ nova_api_port }}
osapi_compute_workers = {{ openstack_service_workers }}
metadata_workers = {{ openstack_service_workers }}
metadata_listen = {{ api_interface_address }}
metadata_listen_port = {{ nova_metadata_port }}
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
{% if neutron_plugin_agent == "openvswitch" %}
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
{% elif neutron_plugin_agent == "linuxbridge" %}
linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
{% endif %}
allow_resize_to_same_host = true
{% if enable_ironic | bool %}
scheduler_host_manager = ironic_host_manager
{% endif %}
{% if service_name == "nova-compute-ironic" %}
compute_driver = ironic.IronicDriver
vnc_enabled = False
ram_allocation_ratio = 1.0
reserved_host_memory_mb = 0
{% elif enable_nova_fake | bool %}
scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
host = {{ ansible_hostname }}_{{ item }}
compute_driver = fake.FakeDriver
{% else %}
compute_driver = libvirt.LibvirtDriver
{% endif %}
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ api_interface_address }}
{% if enable_ceilometer | bool %}
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
{% if enable_watcher | bool %}
compute_monitors=nova.compute.monitors.cpu.virt_driver
{% endif %}
{% endif %}
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
[conductor]
workers = {{ openstack_service_workers }}
{% if nova_console == 'novnc' %}
[vnc]
novncproxy_host = {{ api_interface_address }}
novncproxy_port = {{ nova_novncproxy_port }}
vncserver_listen = {{ api_interface_address }}
vncserver_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups['compute'] %}
novncproxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_novncproxy_port }}/vnc_auto.html
{% endif %}
{% elif nova_console == 'spice' %}
[vnc]
# We have to turn off vnc to use spice
enabled = false
[spice]
enabled = true
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups['compute'] %}
html5proxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
{% endif %}
html5proxy_host = {{ api_interface_address }}
html5proxy_port = {{ nova_spicehtml5proxy_port }}
{% endif %}
{% if service_name == "nova-compute-ironic" %}
[ironic]
username = {{ ironic_keystone_user }}
password = {{ ironic_keystone_password }}
auth_url = {{ openstack_auth.auth_url }}/v3
auth_type = password
project_name = service
user_domain_name = default
project_domain_name = default
api_endpoint = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}/v1
{% endif %}
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = {% for host in groups['glance-api'] %}{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
num_retries = {{ groups['glance-api'] | length }}
[cinder]
catalog_info = volumev2:cinderv2:internalURL
[neutron]
{% if orchestration_engine == 'KUBERNETES' %}
url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }}
{% else %}
url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}
{% endif %}
metadata_proxy_shared_secret = {{ metadata_secret }}
service_metadata_proxy = true
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = {{ neutron_keystone_user }}
password = {{ neutron_keystone_password }}
[database]
connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }}
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[keystone_authtoken]
{% if orchestration_engine == 'KUBERNETES' %}
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
{% else %}
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
{% endif %}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ nova_keystone_user }}
password = {{ nova_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif%}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[libvirt]
connection_uri = "qemu+tcp://{{ api_interface_address }}/system"
{% if enable_ceph | bool and nova_backend == "rbd" %}
images_type = rbd
images_rbd_pool = {{ ceph_nova_pool_name }}
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
disk_cachemodes="network=writeback"
hw_disk_discard = unmap
{% endif %}
{% if nova_backend == "rbd" %}
rbd_secret_uuid = {{ rbd_secret_uuid }}
{% endif %}
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
{% if enable_ceilometer | bool %}
driver = messagingv2
topics = notifications
{% else %}
driver = noop
{% endif %}
[privsep_entrypoint]
helper_command=sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file /etc/nova/nova.conf