2016-11-15 18:51:52 +00:00
|
|
|
[DEFAULT]
|
|
|
|
debug = {{ tacker_logging_debug }}
|
|
|
|
log_dir = /var/log/kolla/tacker
|
|
|
|
|
2017-05-29 18:14:06 -04:00
|
|
|
transport_url = {{ rpc_transport_url }}
|
2016-11-15 18:51:52 +00:00
|
|
|
|
|
|
|
bind_host = {{ api_interface_address }}
|
|
|
|
bind_port = {{ tacker_server_port }}
|
2021-10-08 15:43:02 +02:00
|
|
|
api_workers = {{ tacker_api_workers }}
|
2017-06-26 14:35:05 +02:00
|
|
|
service_plugins = nfvo,vnfm
|
2016-11-15 18:51:52 +00:00
|
|
|
|
2017-06-26 14:35:05 +02:00
|
|
|
[nfvo]
|
|
|
|
vim_drivers = openstack
|
|
|
|
|
2017-09-19 07:08:26 +00:00
|
|
|
[openstack_vim]
|
|
|
|
stack_retries = 60
|
|
|
|
stack_retry_wait = 10
|
|
|
|
|
2017-06-26 14:35:05 +02:00
|
|
|
{% if enable_barbican | bool %}
|
|
|
|
[vim_keys]
|
|
|
|
use_barbican = True
|
|
|
|
{% endif %}
|
|
|
|
|
|
|
|
[tacker]
|
|
|
|
monitor_driver = ping,http_ping
|
|
|
|
alarm_monitor_driver = ceilometer
|
2016-11-15 18:51:52 +00:00
|
|
|
|
|
|
|
[database]
|
|
|
|
connection = mysql+pymysql://{{ tacker_database_user }}:{{ tacker_database_password }}@{{ tacker_database_address }}/{{ tacker_database_name }}
|
2020-09-22 17:52:36 +02:00
|
|
|
connection_recycle_time = {{ database_connection_recycle_time }}
|
|
|
|
max_pool_size = {{ database_max_pool_size }}
|
2016-11-15 18:51:52 +00:00
|
|
|
max_retries = -1
|
|
|
|
|
|
|
|
[keystone_authtoken]
|
2022-03-16 15:12:30 +00:00
|
|
|
service_type = nfv-orchestration
|
2018-12-18 13:36:18 -05:00
|
|
|
www_authenticate_uri = {{ keystone_internal_url }}
|
2022-05-28 18:19:01 +02:00
|
|
|
auth_url = {{ keystone_internal_url }}
|
2016-11-15 18:51:52 +00:00
|
|
|
auth_type = password
|
2017-06-26 14:35:05 +02:00
|
|
|
project_domain_name = {{ default_project_domain_id }}
|
|
|
|
user_domain_name = {{ default_user_domain_id }}
|
2016-11-15 18:51:52 +00:00
|
|
|
project_name = service
|
|
|
|
username = {{ tacker_keystone_user }}
|
|
|
|
password = {{ tacker_keystone_password }}
|
2020-04-03 14:49:08 +01:00
|
|
|
cafile = {{ openstack_cacert }}
|
2021-05-18 16:06:41 +02:00
|
|
|
region_name = {{ openstack_region_name }}
|
2016-11-15 18:51:52 +00:00
|
|
|
|
|
|
|
memcache_security_strategy = ENCRYPT
|
|
|
|
memcache_secret_key = {{ memcache_secret_key }}
|
2019-09-11 20:47:00 +02:00
|
|
|
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
2016-11-15 18:51:52 +00:00
|
|
|
|
2017-04-03 15:42:11 +01:00
|
|
|
[alarm_auth]
|
|
|
|
username = {{ tacker_keystone_user }}
|
|
|
|
password = {{ tacker_keystone_password }}
|
|
|
|
project_name = service
|
2022-05-28 18:19:01 +02:00
|
|
|
url = {{ keystone_internal_url }}
|
2017-04-03 15:42:11 +01:00
|
|
|
|
|
|
|
[ceilometer]
|
|
|
|
host = {{ api_interface_address }}
|
|
|
|
port = {{ tacker_server_port }}
|
|
|
|
|
2016-11-15 18:51:52 +00:00
|
|
|
|
|
|
|
[oslo_messaging_notifications]
|
2017-05-29 18:14:06 -04:00
|
|
|
transport_url = {{ notify_transport_url }}
|
Standardize the configuration of "oslo_messaging" section
After all of the discussions we had on
"https://review.opendev.org/#/c/670626/2", I studied all projects that
have an "oslo_messaging" section. Afterwards, I applied the same method
that is already used in "oslo_messaging" section in Nova, Cinder, and
others. This guarantees that we have a consistent method to
enable/disable notifications across projects based on components (e.g.
Ceilometer) being enabled or disabled. Here follows the list of
components, and the respective changes I did.
* Aodh:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Congress:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Cinder:
It was already properly configured.
* Octavia:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Heat:
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Ceilometer:
Ceilometer publishes some messages in the rabbitMQ. However, the
default driver is "messagingv2", and not ''(empty) as defined in Oslo;
these configurations are defined in ceilometer/publisher/messaging.py.
Therefore, we do not need to do anything for the
"oslo_messaging_notifications" section in Ceilometer
* Tacker:
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Neutron:
It was already properly configured.
* Nova
It was already properly configured. However, we found another issue
with its configuration. Kolla-ansible does not configure nova
notifications as it should. If 'searchlight' is not installed (enabled)
the 'notification_format' should be 'unversioned'. The default is
'both'; so nova will send a notification to the queue
versioned_notifications; but that queue has no consumer when
'searchlight' is disabled. In our case, the queue got 511k messages.
The huge amount of "stuck" messages made the Rabbitmq cluster
unstable.
https://bugzilla.redhat.com/show_bug.cgi?id=1478274
https://bugs.launchpad.net/ceilometer/+bug/1665449
* Nova_hyperv:
I added the same configurations as in Nova project.
* Vitrage
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Searchlight
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Ironic
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Glance
It was already properly configured.
* Trove
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Blazar
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Sahara
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Watcher
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Barbican
I created a mechanism similar to what we have in Cinder, Nova,
and others. I also added a configuration to 'keystone_notifications'
section. Barbican needs its own queue to capture events from Keystone.
Otherwise, it has an impact on Ceilometer and other systems that are
connected to the "notifications" default queue.
* Keystone
Keystone is the system that triggered this work with the discussions
that followed on https://review.opendev.org/#/c/670626/2. After a long
discussion, we agreed to apply the same approach that we have in Nova,
Cinder and other systems in Keystone. That is what we did. Moreover, we
introduce a new topic "barbican_notifications" when barbican is
enabled. We also removed the "variable" enable_cadf_notifications, as
it is obsolete, and the default in Keystone is CADF.
* Mistral:
It was hardcoded "noop" as the driver. However, that does not seem a
good practice. Instead, I applied the same standard of using the driver
and pushing to "notifications" queue if Ceilometer is enabled.
* Cyborg:
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Murano
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Senlin
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Manila
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Zun
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Designate
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Magnum
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
Closes-Bug: #1838985
Change-Id: I88bdb004814f37c81c9a9c4e5e491fac69f6f202
Signed-off-by: Rafael Weingärtner <rafael@apache.org>
2019-07-26 09:25:25 -03:00
|
|
|
{% if tacker_enabled_notification_topics %}
|
2016-11-15 18:51:52 +00:00
|
|
|
driver = messagingv2
|
Standardize the configuration of "oslo_messaging" section
After all of the discussions we had on
"https://review.opendev.org/#/c/670626/2", I studied all projects that
have an "oslo_messaging" section. Afterwards, I applied the same method
that is already used in "oslo_messaging" section in Nova, Cinder, and
others. This guarantees that we have a consistent method to
enable/disable notifications across projects based on components (e.g.
Ceilometer) being enabled or disabled. Here follows the list of
components, and the respective changes I did.
* Aodh:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Congress:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Cinder:
It was already properly configured.
* Octavia:
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Heat:
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Ceilometer:
Ceilometer publishes some messages in the rabbitMQ. However, the
default driver is "messagingv2", and not ''(empty) as defined in Oslo;
these configurations are defined in ceilometer/publisher/messaging.py.
Therefore, we do not need to do anything for the
"oslo_messaging_notifications" section in Ceilometer
* Tacker:
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Neutron:
It was already properly configured.
* Nova
It was already properly configured. However, we found another issue
with its configuration. Kolla-ansible does not configure nova
notifications as it should. If 'searchlight' is not installed (enabled)
the 'notification_format' should be 'unversioned'. The default is
'both'; so nova will send a notification to the queue
versioned_notifications; but that queue has no consumer when
'searchlight' is disabled. In our case, the queue got 511k messages.
The huge amount of "stuck" messages made the Rabbitmq cluster
unstable.
https://bugzilla.redhat.com/show_bug.cgi?id=1478274
https://bugs.launchpad.net/ceilometer/+bug/1665449
* Nova_hyperv:
I added the same configurations as in Nova project.
* Vitrage
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Searchlight
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Ironic
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Glance
It was already properly configured.
* Trove
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Blazar
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Sahara
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Watcher
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Barbican
I created a mechanism similar to what we have in Cinder, Nova,
and others. I also added a configuration to 'keystone_notifications'
section. Barbican needs its own queue to capture events from Keystone.
Otherwise, it has an impact on Ceilometer and other systems that are
connected to the "notifications" default queue.
* Keystone
Keystone is the system that triggered this work with the discussions
that followed on https://review.opendev.org/#/c/670626/2. After a long
discussion, we agreed to apply the same approach that we have in Nova,
Cinder and other systems in Keystone. That is what we did. Moreover, we
introduce a new topic "barbican_notifications" when barbican is
enabled. We also removed the "variable" enable_cadf_notifications, as
it is obsolete, and the default in Keystone is CADF.
* Mistral:
It was hardcoded "noop" as the driver. However, that does not seem a
good practice. Instead, I applied the same standard of using the driver
and pushing to "notifications" queue if Ceilometer is enabled.
* Cyborg:
I created a mechanism similar to what we have in AODH, Cinder, Nova,
and others.
* Murano
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Senlin
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Manila
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Zun
The section is declared, but it is not used. Therefore, it will
be removed in an upcomming PR.
* Designate
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
* Magnum
It was already using a similar scheme; I just modified it a little bit
to be the same as we have in all other components
Closes-Bug: #1838985
Change-Id: I88bdb004814f37c81c9a9c4e5e491fac69f6f202
Signed-off-by: Rafael Weingärtner <rafael@apache.org>
2019-07-26 09:25:25 -03:00
|
|
|
topics = {{ tacker_enabled_notification_topics | map(attribute='name') | join(',') }}
|
2016-11-15 18:51:52 +00:00
|
|
|
{% else %}
|
|
|
|
driver = noop
|
|
|
|
{% endif %}
|
2018-01-08 17:19:34 +07:00
|
|
|
|
2020-05-14 15:18:56 +01:00
|
|
|
[oslo_messaging_rabbit]
|
2022-12-21 10:45:52 +00:00
|
|
|
heartbeat_in_pthread = false
|
|
|
|
{% if om_enable_rabbitmq_tls | bool %}
|
2020-05-14 15:18:56 +01:00
|
|
|
ssl = true
|
|
|
|
ssl_ca_file = {{ om_rabbitmq_cacert }}
|
|
|
|
{% endif %}
|
2022-12-15 09:03:15 +00:00
|
|
|
{% if om_enable_rabbitmq_high_availability | bool %}
|
|
|
|
amqp_durable_queues = true
|
|
|
|
{% endif %}
|
2023-10-17 11:38:23 +02:00
|
|
|
{% if om_enable_rabbitmq_quorum_queues | bool %}
|
|
|
|
rabbit_quorum_queue = true
|
|
|
|
{% endif %}
|
2020-05-14 15:18:56 +01:00
|
|
|
|
2018-01-08 17:19:34 +07:00
|
|
|
{% if tacker_policy_file is defined %}
|
|
|
|
[oslo_policy]
|
|
|
|
policy_file = {{ tacker_policy_file }}
|
|
|
|
{% endif %}
|
2019-11-23 21:46:11 +01:00
|
|
|
|
|
|
|
[glance_store]
|
|
|
|
filesystem_store_datadir = /var/lib/tacker/csar_files
|