bceb008249
The notification driver setup was resulting in the driver and connection string on the same line. This is caused by the case statement and how jinja formats the template when a case statement is present. This change modifies how the driver string is created using a ternary, which will eliminate the case statement and render the value of the diver correctly. Change-Id: I2645beb3eed1948f66f76fc7eb45e14923abfa78 Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
473 lines
13 KiB
Django/Jinja
473 lines
13 KiB
Django/Jinja
[DEFAULT]
|
|
use_journal = True
|
|
# Disable stderr logging
|
|
use_stderr = False
|
|
|
|
########################
|
|
## General Configuration
|
|
########################
|
|
# Show debugging output in logs (sets DEBUG log level output)
|
|
debug = {{ debug }}
|
|
|
|
# Top-level directory for maintaining designate's state
|
|
#state_path = /var/lib/designate
|
|
|
|
{% if designate_ceilometer_enabled | bool %}
|
|
# Driver used for issuing notifications
|
|
notification_driver = messagingv2
|
|
{% endif %}
|
|
|
|
# Root helper
|
|
root_helper = sudo designate-rootwrap /etc/designate/rootwrap.conf
|
|
|
|
# Which networking API to use, Defaults to neutron
|
|
#network_api = neutron
|
|
|
|
## RabbitMQ RPC
|
|
transport_url = {{ designate_oslomsg_rpc_transport }}://{% for host in designate_oslomsg_rpc_servers.split(',') %}{{ designate_oslomsg_rpc_userid }}:{{ designate_oslomsg_rpc_password }}@{{ host }}:{{ designate_oslomsg_rpc_port }}{% if not loop.last %},{% else %}/{{ designate_oslomsg_rpc_vhost }}{% if designate_oslomsg_rpc_use_ssl | bool %}?ssl=1{% else %}?ssl=0{% endif %}{% endif %}{% endfor %}
|
|
|
|
# RabbitMQ Config
|
|
[oslo_messaging_rabbit]
|
|
ssl = {{ designate_oslomsg_rpc_use_ssl }}
|
|
rabbit_notification_exchange = designate
|
|
rabbit_notification_topic = notifications
|
|
|
|
[oslo_messaging_notifications]
|
|
driver = {{ (designate_ceilometer_enabled | bool) | ternary('messagingv2', 'noop') }}
|
|
transport_url = {{ designate_oslomsg_notify_transport }}://{% for host in designate_oslomsg_notify_servers.split(',') %}{{ designate_oslomsg_notify_userid }}:{{ designate_oslomsg_notify_password }}@{{ host }}:{{ designate_oslomsg_notify_port }}{% if not loop.last %},{% else %}/{{ designate_oslomsg_notify_vhost }}{% if designate_oslomsg_notify_use_ssl | bool %}?ssl=1{% else %}?ssl=0{% endif %}{% endif %}{% endfor %}
|
|
|
|
########################
|
|
## Service Configuration
|
|
########################
|
|
#-----------------------
|
|
# Central Service
|
|
#-----------------------
|
|
[service:central]
|
|
# Number of central worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of central greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# Maximum domain name length
|
|
#max_domain_name_len = 255
|
|
|
|
# Maximum recordset name length
|
|
#max_recordset_name_len = 255
|
|
|
|
# Minimum TTL
|
|
#min_ttl = None
|
|
|
|
# The name of the default pool
|
|
#default_pool_id = '794ccc2c-d751-44fe-b57f-8894c9f5c842'
|
|
|
|
## Managed resources settings
|
|
|
|
# Email to use for managed resources like domains created by the FloatingIP API
|
|
#managed_resource_email = hostmaster@example.com.
|
|
|
|
# Tenant ID to own all managed resources - like auto-created records etc.
|
|
managed_resource_tenant_id = {{ designate_managed_resource_tenant_id }}
|
|
|
|
#-----------------------
|
|
# API Service
|
|
#-----------------------
|
|
[service:api]
|
|
# Number of api worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of api greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# Enable host request headers
|
|
enable_host_header = True
|
|
|
|
# The base uri used in responses
|
|
#api_base_uri = 'http://127.0.0.1:9001/'
|
|
|
|
# Address to bind the API server
|
|
#api_host = 0.0.0.0
|
|
|
|
# Port to bind the API server
|
|
#api_port = 9001
|
|
|
|
# Maximum line size of message headers to be accepted. max_header_line may
|
|
# need to be increased when using large tokens (typically those generated by
|
|
# the Keystone v3 API with big service catalogs).
|
|
#max_header_line = 16384
|
|
|
|
# Authentication strategy to use - can be either "noauth" or "keystone"
|
|
auth_strategy = keystone
|
|
|
|
# Enable API Version 2
|
|
enable_api_v2 = True
|
|
|
|
# Enabled API Version 2 extensions
|
|
enabled_extensions_v2 = quotas, reports
|
|
|
|
# Default per-page limit for the V2 API, a value of None means show all results
|
|
# by default
|
|
#default_limit_v2 = 20
|
|
|
|
# Max page size in the V2 API
|
|
#max_limit_v2 = 1000
|
|
|
|
# Enable Admin API (experimental)
|
|
enable_api_admin = False
|
|
|
|
# Enabled Admin API extensions
|
|
# Can be one or more of : reports, quotas, counts, tenants, zones
|
|
# zone export is in zones extension
|
|
#enabled_extensions_admin =
|
|
|
|
# Default per-page limit for the Admin API, a value of None means show all results
|
|
# by default
|
|
#default_limit_admin = 20
|
|
|
|
# Max page size in the Admin API
|
|
#max_limit_admin = 1000
|
|
|
|
# Show the pecan HTML based debug interface (v2 only)
|
|
# This is only useful for development, and WILL break python-designateclient
|
|
# if an error occurs
|
|
#pecan_debug = False
|
|
|
|
#-----------------------
|
|
# Keystone Middleware
|
|
#-----------------------
|
|
[keystone_authtoken]
|
|
|
|
insecure = {{ keystone_service_internaluri_insecure | bool }}
|
|
auth_type = {{ designate_keystone_auth_type }}
|
|
auth_url = {{ keystone_service_adminurl }}
|
|
www_authenticate_uri = {{ keystone_service_internaluri }}
|
|
project_domain_id = {{ designate_service_project_domain_id }}
|
|
user_domain_id = {{ designate_service_user_domain_id }}
|
|
project_name = {{ designate_service_project_name }}
|
|
username = {{ designate_service_user_name }}
|
|
password = {{ designate_service_password }}
|
|
|
|
memcached_servers = {{ memcached_servers }}
|
|
|
|
#-----------------------
|
|
# Sink Service
|
|
#-----------------------
|
|
[service:sink]
|
|
# List of notification handlers to enable, configuration of these needs to
|
|
# correspond to a [handler:my_driver] section below or else in the config
|
|
# Can be one or more of : nova_fixed, neutron_floatingip
|
|
#enabled_notification_handlers =
|
|
|
|
#-----------------------
|
|
# mDNS Service
|
|
#-----------------------
|
|
[service:mdns]
|
|
# Number of mdns worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of mdns greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# mDNS Bind Host
|
|
#host = 0.0.0.0
|
|
|
|
# mDNS Port Number
|
|
#port = 5354
|
|
|
|
# mDNS TCP Backlog
|
|
#tcp_backlog = 100
|
|
|
|
# mDNS TCP Receive Timeout
|
|
#tcp_recv_timeout = 0.5
|
|
|
|
# Enforce all incoming queries (including AXFR) are TSIG signed
|
|
#query_enforce_tsig = False
|
|
|
|
# Send all traffic over TCP
|
|
#all_tcp = False
|
|
|
|
# Maximum message size to emit
|
|
#max_message_size = 65535
|
|
|
|
#-----------------------
|
|
# Agent Service
|
|
#-----------------------
|
|
[service:agent]
|
|
#workers = None
|
|
#host = 0.0.0.0
|
|
#port = 5358
|
|
#tcp_backlog = 100
|
|
#allow_notify = 127.0.0.1
|
|
#masters = 127.0.0.1:5354
|
|
#backend_driver = fake
|
|
#transfer_source = None
|
|
#notify_delay = 0
|
|
|
|
#-----------------------
|
|
# Producer Service
|
|
#-----------------------
|
|
[service:producer]
|
|
# Number of Zone Manager worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of Zone Manager greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# List of Zone Manager tasks to enable, a value of None will enable all tasks.
|
|
# Can be one or more of: periodic_exists
|
|
#enabled_tasks = None
|
|
|
|
# Whether to allow synchronous zone exports
|
|
#export_synchronous = True
|
|
|
|
#------------------------
|
|
# Deleted domains purging
|
|
#------------------------
|
|
[producer_task:domain_purge]
|
|
# How frequently to purge deleted domains, in seconds
|
|
#interval = 3600 # 1h
|
|
|
|
# How many records to be deleted on each run
|
|
#batch_size = 100
|
|
|
|
# How old deleted records should be (deleted_at) to be purged, in seconds
|
|
#time_threshold = 604800 # 7 days
|
|
|
|
#------------------------
|
|
# Delayed zones NOTIFY
|
|
#------------------------
|
|
[producer_task:delayed_notify]
|
|
# How frequently to scan for zones pending NOTIFY, in seconds
|
|
#interval = 5
|
|
|
|
#------------------------
|
|
# Worker Periodic Recovery
|
|
#------------------------
|
|
[producer_task:worker_periodic_recovery]
|
|
# How frequently to scan for zones in ERROR, in seconds
|
|
#interval = 120
|
|
|
|
#-----------------------
|
|
# Pool Manager Service
|
|
#-----------------------
|
|
[service:pool_manager]
|
|
# Number of Pool Manager worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of Pool Manager greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# The ID of the pool managed by this instance of the Pool Manager
|
|
pool_id = {{ designate_pool_uuid }}
|
|
|
|
# The percentage of servers requiring a successful update for a domain change
|
|
# to be considered active
|
|
#threshold_percentage = 100
|
|
|
|
# The time to wait for a response from a server
|
|
#poll_timeout = 30
|
|
|
|
# The time between retrying to send a request and waiting for a response from a
|
|
# server
|
|
#poll_retry_interval = 15
|
|
|
|
# The maximum number of times to retry sending a request and wait for a
|
|
# response from a server
|
|
#poll_max_retries = 10
|
|
|
|
# The time to wait before sending the first request to a server
|
|
#poll_delay = 5
|
|
|
|
# Enable the recovery thread
|
|
#enable_recovery_timer = True
|
|
|
|
# The time between recovering from failures
|
|
#periodic_recovery_interval = 120
|
|
|
|
# Enable the sync thread
|
|
#enable_sync_timer = True
|
|
|
|
# The time between synchronizing the servers with storage
|
|
#periodic_sync_interval = 1800
|
|
|
|
# Zones Updated within last N seconds will be syncd. Use None to sync all zones
|
|
#periodic_sync_seconds = None
|
|
|
|
# Perform multiple update attempts during periodic_sync
|
|
#periodic_sync_max_attempts = 3
|
|
#periodic_sync_retry_interval = 30
|
|
|
|
# The cache driver to use
|
|
#cache_driver = memcache
|
|
|
|
#-----------------------
|
|
# Worker Service
|
|
#-----------------------
|
|
[service:worker]
|
|
# Whether to send events to worker instead of Pool Manager
|
|
enabled = True
|
|
|
|
# Number of Worker processes to spawn
|
|
#workers = None
|
|
|
|
# Number of Worker greenthreads to spawn
|
|
#threads = 1000
|
|
|
|
# The percentage of servers requiring a successful update for a zone change
|
|
# to be considered active
|
|
#threshold_percentage = 100
|
|
|
|
# The time to wait for a response from a server
|
|
#poll_timeout = 30
|
|
|
|
# The time between retrying to send a request and waiting for a response from a
|
|
# server
|
|
#poll_retry_interval = 15
|
|
|
|
# The maximum number of times to retry sending a request and wait for a
|
|
# response from a server
|
|
#poll_max_retries = 10
|
|
|
|
# The time to wait before sending the first request to a server
|
|
#poll_delay = 5
|
|
|
|
# Whether to allow worker to send NOTIFYs. NOTIFY requests to mdns will noop
|
|
notify = True
|
|
|
|
###################################
|
|
## Pool Manager Cache Configuration
|
|
###################################
|
|
#-----------------------
|
|
# SQLAlchemy Pool Manager Cache
|
|
#-----------------------
|
|
[pool_manager_cache:sqlalchemy]
|
|
#connection=sqlite:///$state_path/designate_pool_manager.sqlite
|
|
#connection_debug = 100
|
|
#connection_trace = False
|
|
#sqlite_synchronous = True
|
|
#idle_timeout = 3600
|
|
#max_retries = 10
|
|
#retry_interval = 10
|
|
|
|
#-----------------------
|
|
# Memcache Pool Manager Cache
|
|
#-----------------------
|
|
[pool_manager_cache:memcache]
|
|
#memcached_servers = None
|
|
#expiration = 3600
|
|
|
|
##############
|
|
## Network API
|
|
##############
|
|
[network_api:neutron]
|
|
# Comma separated list of values, formatted "<name>|<neutron_uri>"
|
|
#endpoints = RegionOne|http://localhost:9696
|
|
endpoint_type = internalURL
|
|
#timeout = 30
|
|
#admin_username = designate
|
|
#admin_password = designate
|
|
#admin_tenant_name = designate
|
|
#auth_url = http://localhost:5000/v2.0
|
|
#insecure = False
|
|
#auth_strategy = keystone
|
|
#ca_certificates_file =
|
|
|
|
########################
|
|
## Storage Configuration
|
|
########################
|
|
#-----------------------
|
|
# SQLAlchemy Storage
|
|
#-----------------------
|
|
[storage:sqlalchemy]
|
|
connection = mysql+pymysql://{{ designate_galera_user }}:{{ designate_galera_password }}@{{ designate_galera_address }}/{{ designate_galera_database }}?charset=utf8{% if designate_galera_use_ssl | bool %}&ssl_ca={{ designate_galera_ssl_ca_cert }}{% endif %}
|
|
|
|
#connection_debug = 0
|
|
#connection_trace = False
|
|
#sqlite_synchronous = True
|
|
#idle_timeout = 3600
|
|
#max_retries = 10
|
|
#retry_interval = 10
|
|
|
|
########################
|
|
## Handler Configuration
|
|
########################
|
|
#-----------------------
|
|
# Nova Fixed Handler
|
|
#-----------------------
|
|
[handler:nova_fixed]
|
|
# Domain ID of domain to create records in. Should be pre-created
|
|
#domain_id =
|
|
#notification_topics = notifications
|
|
#control_exchange = 'nova'
|
|
##Following https://docs.openstack.org/releasenotes/designate/pike.html#critical-issues
|
|
#formatv4 = '%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(zone)s'
|
|
#formatv4 = '%(hostname)s.%(project)s.%(zone)s'
|
|
#formatv4 = '%(hostname)s.%(zone)s'
|
|
#formatv6 = '%(hostname)s.%(project)s.%(zone)s'
|
|
#formatv6 = '%(hostname)s.%(zone)s'
|
|
|
|
#------------------------
|
|
# Neutron Floating Handler
|
|
#------------------------
|
|
[handler:neutron_floatingip]
|
|
# Domain ID of domain to create records in. Should be pre-created
|
|
#domain_id =
|
|
#notification_topics = notifications
|
|
#control_exchange = 'neutron'
|
|
##Following https://docs.openstack.org/releasenotes/designate/pike.html#critical-issues
|
|
#formatv4 = '%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(zone)s'
|
|
#formatv4 = '%(hostname)s.%(project)s.%(zone)s'
|
|
#formatv4 = '%(hostname)s.%(zone)s'
|
|
#formatv6 = '%(hostname)s.%(project)s.%(zone)s'
|
|
#formatv6 = '%(hostname)s.%(zone)s'
|
|
|
|
#############################
|
|
## Agent Backend Configuration
|
|
#############################
|
|
[backend:agent:bind9]
|
|
#rndc_host = 127.0.0.1
|
|
#rndc_port = 953
|
|
#rndc_config_file = /etc/rndc.conf
|
|
#rndc_key_file = /etc/rndc.key
|
|
#zone_file_path = $state_path/zones
|
|
#query_destination = 127.0.0.1
|
|
#
|
|
[backend:agent:denominator]
|
|
#name = dynect
|
|
#config_file = /etc/denominator.conf
|
|
|
|
########################
|
|
## Library Configuration
|
|
########################
|
|
[oslo_concurrency]
|
|
# Path for Oslo Concurrency to store lock files, defaults to the value
|
|
# of the state_path setting.
|
|
#lock_path = $state_path
|
|
|
|
########################
|
|
## Coordination
|
|
########################
|
|
[coordination]
|
|
# URL for the coordination backend to use.
|
|
#backend_url = kazoo://127.0.0.1/
|
|
|
|
########################
|
|
## Hook Points
|
|
########################
|
|
# Hook Points are enabled when added to the config and there has been
|
|
# a package that provides the corresponding named designate.hook_point
|
|
# entry point.
|
|
|
|
# [hook_point:name_of_hook_point]
|
|
# some_param_for_hook = 42
|
|
# Hooks can be disabled in the config
|
|
# enabled = False
|
|
|
|
# Hook can also be applied to the import path when the hook has not
|
|
# been given an explicit name. The name is created from the hook
|
|
# target function / method:
|
|
#
|
|
# name = '%s.%s' % (func.__module__, func.__name__)
|
|
|
|
# [hook_point:designate.api.v2.controllers.zones.get_one]
|