Add size limits to Fluentd buffers

This change adds size limits to the Fluentd buffers to prevent them from
becoming too large for OpenSearch and Elasticsearch. These values have
been tested and prevent the buffer becoming too big for OpenSearch.

Closes-Bug: #2079988
Change-Id: I06e34ceb26bf792f24ee4030a267c94f9ee22e3b
This commit is contained in:
Dawud 2024-07-17 20:26:53 +01:00
parent 5a9e268453
commit 6a2369f381
No known key found for this signature in database
5 changed files with 29 additions and 0 deletions

View File

@ -139,6 +139,18 @@ cron_logrotate_schedule: "daily"
# Enable the additional watch timer
fluentd_enable_watch_timer: "false"
# Set limits for queue size and chunk size
# We need to ensure that the bulk_message_request_threshold is set below the
# default maximum content length for the OpenSearch bulk API (100MB). By
# default the bulk_message_request_threshold is unlimited, which can lead to
# large payloads being sent and subsequently rejected by the OpenSearch API.
fluentd_bulk_message_request_threshold: "20M"
# The fluentd buffer chunk limit size is the maximum size of a single chunk in
# the buffer. This should be set to a value that is less than the maximum size
# of the bulk_message_request_threshold.
fluentd_buffer_chunk_limit_size: "8M"
fluentd_input_openstack_services:
- name: aodh
enabled: "{{ enable_aodh | bool }}"

View File

@ -39,10 +39,12 @@
reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true
bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
<buffer>
@type file
path /var/lib/fluentd/data/elasticsearch.buffer/{{ item.facility }}.*
flush_interval 15s
chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
</buffer>
</store>
{% elif log_direct_to_opensearch %}
@ -70,10 +72,12 @@
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/{{ item.facility }}.*
flush_interval 15s
chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
</buffer>
</store>
{% endif %}

View File

@ -24,10 +24,12 @@
reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true
bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
<buffer>
@type file
path /var/lib/fluentd/data/elasticsearch.buffer/openstack.*
flush_interval 15s
chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
</buffer>
</store>
</match>

View File

@ -24,10 +24,12 @@
reconnect_on_error true
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
<buffer>
@type file
path /var/lib/fluentd/data/opensearch.buffer/openstack.*
flush_interval 15s
chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
</buffer>
</store>
</match>

View File

@ -0,0 +1,9 @@
---
fixes:
- |
Reduce the size of the fluentd buffers to avoid getting HTTP 413 errors
when sending logs to opensearch/elasticsearch. The values chosen were based
on what seemed a sensible size. These can be customised by editing the
``fluentd_bulk_message_request_threshold`` and
``fluentd_buffer_chunk_limit_size`` variables. `LP#2079988
<https://bugs.launchpad.net/kolla-ansible/+bug/2079988>`__