Update beat templates from 7.11 reference

Depends-On: Ida8668f5548a15a8f597839bd9002585aeea5d1a
Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-ops/+/843423

Change-Id: Ib889cc7ac7ad2540031016075cb5baab091bd6e3
This commit is contained in:
Erik Berg 2021-03-02 20:22:50 +01:00 committed by Andrew Bonney
parent f2fe9aa59f
commit f97ebbf990
8 changed files with 4837 additions and 1414 deletions

File diff suppressed because it is too large Load Diff

View File

@ -32,9 +32,15 @@ heartbeat.monitors:
- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping
# configured hosts
# Monitor name used for job name and document type.
# ID used to uniquely identify this monitor in elasticsearch even if the config changes
#id: my-monitor
# Human readable display name for this service in Uptime UI and elsewhere
name: icmp
# Name of corresponding APM service, if Elastic APM is in use for the monitored service.
# service.name: my-apm-service-name
# Enable/Disable monitor
enabled: true
@ -43,6 +49,7 @@ heartbeat.monitors:
# List of hosts to ping
hosts: {{ (icmp_hosts | default([])) | to_json }}
# Configure IP protocol types to ping on if hostnames are configured.
# Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`.
ipv4: true
@ -80,6 +87,17 @@ heartbeat.monitors:
# Interval between file file changed checks.
#interval: 5s
# The Ingest Node pipeline ID associated with this input. If this is set, it
# overwrites the pipeline option from the Elasticsearch output.
#pipeline:
# The index name associated with this input. If this is set, it
# overwrites the index option from the Elasticsearch output.
#index:
# Set to true to publish fields with null values in events.
#keep_null: false
# Define a directory to load monitor definitions from. Definitions take the form
# of individual yaml files.
# heartbeat.config.monitors:
@ -102,29 +120,17 @@ heartbeat.monitors:
{% if hosts | length > 0 %}
- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint
# by sending/receiving a custom payload
# ID used to uniquely identify this monitor in elasticsearch even if the config changes
#id: my-monitor
# Monitor name used for job name and document type
# Human readable display name for this service in Uptime UI and elsewhere
name: {{ item.name }}
# Enable/Disable monitor
enabled: true
# Configure task schedule
schedule: '@every 30s' # every 30 seconds from start of beat
# configure hosts to ping.
# Entries can be:
# - plain host name or IP like `localhost`:
# Requires ports configs to be checked. If ssl is configured,
# a SSL/TLS based connection will be established. Otherwise plain tcp connection
# will be established
name: "{{ item.name }}"
# Enable/Disable monitor
enabled: true
# Configure task schedule
schedule: '@every 45s' # every 5 seconds from start of beat
schedule: '@every 45s'
# configure hosts to ping.
# Entries can be:
@ -178,6 +184,27 @@ heartbeat.monitors:
# Required TLS protocols
#supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]
# NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE
# Configure file json file to be watched for changes to the monitor:
#watch.poll_file:
# Path to check for updates.
#path:
# Interval between file file changed checks.
#interval: 5s
# The Ingest Node pipeline ID associated with this input. If this is set, it
# overwrites the pipeline option from the Elasticsearch output.
#pipeline:
# The index name associated with this input. If this is set, it
# overwrites the index option from the Elasticsearch output.
#index:
# Set to true to publish fields with null values in events.
#keep_null: false
{% endif %}
{% elif item.type == 'http' %}
{% set hosts = [] %}
@ -188,25 +215,18 @@ heartbeat.monitors:
{% endfor %}
{% endfor %}
{% if hosts | length > 0 %}
# NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE
# Configure file json file to be watched for changes to the monitor:
#watch.poll_file:
# Path to check for updates.
#path:
# Interval between file file changed checks.
#interval: 5s
- type: http # monitor type `http`. Connect via HTTP an optionally verify response
# ID used to uniquely identify this monitor in elasticsearch even if the config changes
#id: my-http-monitor
# Monitor name used for job name and document type
# Human readable display name for this service in Uptime UI and elsewhere
name: "{{ item.name }}"
# Enable/Disable monitor
enabled: true
# Configure task schedule
schedule: '@every 60s' # every 5 seconds from start of beat
schedule: '@every 60s'
# Configure URLs to ping
urls: {{ (hosts | default([])) | to_json }}
@ -252,13 +272,13 @@ heartbeat.monitors:
# Dictionary of additional HTTP headers to send:
headers:
User-agent: osa-heartbeat-healthcheck
# Optional request body content
#body:
# Expected response settings
{% if item.check_response is defined %}
# Expected response settings
check.response: {{ item.check_response }}
#check.response:
# Expected status code. If not configured or set to 0 any status code not
# being 404 is accepted.
#status: 0
@ -268,10 +288,6 @@ heartbeat.monitors:
# Required response contents.
#body:
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
# Parses the body as JSON, then checks against the given condition expression
#json:
@ -280,7 +296,7 @@ heartbeat.monitors:
# equals:
# myField: expectedValue
{% endif %}
# NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE
# Configure file json file to be watched for changes to the monitor:
#watch.poll_file:
@ -290,16 +306,30 @@ heartbeat.monitors:
# Interval between file file changed checks.
#interval: 5s
# The Ingest Node pipeline ID associated with this input. If this is set, it
# overwrites the pipeline option from the Elasticsearch output.
#pipeline:
# The index name associated with this input. If this is set, it
# overwrites the index option from the Elasticsearch output.
#index:
# Set to true to publish fields with null values in events.
#keep_null: false
{% endif %}
{% endif %}
{% endfor %}
heartbeat.scheduler:
# Limit number of concurrent tasks executed by heartbeat. The task limit if
# disabled if set to 0. The default is 0.
limit: {{ icmp_hosts | length // 4 }}
# Set the scheduler it's timezone
# Set the scheduler it's time zone
#location: ''
#================================ General ======================================
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
@ -340,9 +370,44 @@ heartbeat.scheduler:
#flush.min_events: 2048
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < min_flush_events.
# if the number of events stored in the queue is < `flush.min_events`.
#flush.timeout: 1s
# The disk queue stores incoming events on disk until the output is
# ready for them. This allows a higher event limit than the memory-only
# queue and lets pending events persist through a restart.
#disk:
# The directory path to store the queue's data.
#path: "${path.data}/diskqueue"
# The maximum space the queue should occupy on disk. Depending on
# input settings, events that exceed this limit are delayed or discarded.
#max_size: 10GB
# The maximum size of a single queue data file. Data in the queue is
# stored in smaller segments that are deleted after all their events
# have been processed.
#segment_size: 1GB
# The number of events to read from disk to memory while waiting for
# the output to request them.
#read_ahead: 512
# The number of events to accept from inputs while waiting for them
# to be written to disk. If event data arrives faster than it
# can be written to disk, this setting prevents it from overflowing
# main memory.
#write_ahead: 2048
# The duration to wait before retrying when the queue encounters a disk
# write error.
#retry_interval: 1s
# The maximum length of time to wait before retrying on a disk write
# error. If the queue encounters repeated errors, it will double the
# length of its retry interval each time, up to this maximum.
#max_retry_interval: 30s
# The spool queue will store events in a local spool file, before
# forwarding the events to the outputs.
#
@ -407,7 +472,7 @@ heartbeat.scheduler:
# default is the number of logical CPUs available in the system.
#max_procs:
#================================ Processors ===================================
# ================================= Processors =================================
{{ elk_macros.beat_processors(processors) }}
# Processors are used to reduce the number of fields in the exported event or to
@ -425,103 +490,155 @@ heartbeat.scheduler:
# values:
#
#processors:
#- include_fields:
# fields: ["cpu"]
#- drop_fields:
# fields: ["cpu.user", "cpu.system"]
# - include_fields:
# fields: ["cpu"]
# - drop_fields:
# fields: ["cpu.user", "cpu.system"]
#
# The following example drops the events that have the HTTP response code 200:
#
#processors:
#- drop_event:
# when:
# equals:
# http.code: 200
# - drop_event:
# when:
# equals:
# http.code: 200
#
# The following example renames the field a to b:
#
#processors:
#- rename:
# fields:
# - from: "a"
# to: "b"
# - rename:
# fields:
# - from: "a"
# to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#- dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
# - dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
#
#processors:
#- add_cloud_metadata: ~
# - add_cloud_metadata: ~
#
# The following example enriches each event with the machine's local time zone
# offset from UTC.
#
#processors:
#- add_locale:
# format: offset
# - add_locale:
# format: offset
#
# The following example enriches each event with docker metadata, it matches
# given fields to an existing container id and adds info from that container:
#
#processors:
#- add_docker_metadata:
# host: "unix:///var/run/docker.sock"
# match_fields: ["system.process.cgroup.id"]
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
# # certificate: "/etc/pki/client/cert.pem"
# # key: "/etc/pki/client/cert.key"
# - add_docker_metadata:
# host: "unix:///var/run/docker.sock"
# match_fields: ["system.process.cgroup.id"]
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
# # certificate: "/etc/pki/client/cert.pem"
# # key: "/etc/pki/client/cert.key"
#
# The following example enriches each event with docker metadata, it matches
# container id from log path available in `source` field (by default it expects
# it to be /var/lib/docker/containers/*/*.log).
#
#processors:
#- add_docker_metadata: ~
# - add_docker_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#- add_host_metadata:
# netinfo.enabled: false
# - add_host_metadata: ~
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#- add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
# - add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#- decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
# - decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
#
#processors:
# - decompress_gzip_field:
# from: "field1"
# to: "field2"
# ignore_missing: false
# fail_on_error: true
#
# The following example copies the value of message to message_copied
#
#processors:
# - copy_fields:
# fields:
# - from: message
# to: message_copied
# fail_on_error: true
# ignore_missing: false
#
# The following example truncates the value of message to 1024 bytes
#
#processors:
# - truncate_fields:
# fields:
# - message
# max_bytes: 1024
# fail_on_error: false
# ignore_missing: true
#
# The following example preserves the raw message under event.original
#
#processors:
# - copy_fields:
# fields:
# - from: message
# to: event.original
# fail_on_error: false
# ignore_missing: true
# - truncate_fields:
# fields:
# - event.original
# max_bytes: 1024
# fail_on_error: false
# ignore_missing: true
#
# The following example URL-decodes the value of field1 to field2
#
#processors:
# - urldecode:
# fields:
# - from: "field1"
# to: "field2"
# ignore_missing: false
# fail_on_error: true
#============================= Elastic Cloud ==================================
# =============================== Elastic Cloud ================================
# These settings simplify using heartbeat with the Elastic Cloud (https://cloud.elastic.co/).
# These settings simplify using Heartbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
@ -532,11 +649,11 @@ heartbeat.scheduler:
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs ======================================
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output -------------------------------
# ---------------------------- Elasticsearch Output ----------------------------
#output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -553,8 +670,11 @@ heartbeat.scheduler:
# Configure escaping HTML symbols in strings.
#escape_html: false
# Optional protocol and basic auth credentials.
# Protocol - either `http` (default) or `https`.
#protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
@ -584,6 +704,11 @@ heartbeat.scheduler:
# Proxy server URL
#proxy_url: http://proxy:3128
# Whether to disable proxy settings for outgoing connections. If true, this
# takes precedence over both the proxy_url field and any environment settings
# (HTTP_PROXY, HTTPS_PROXY). The default is false.
#proxy_disable: false
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
@ -610,15 +735,23 @@ heartbeat.scheduler:
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL-based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
@ -636,17 +769,43 @@ heartbeat.scheduler:
#ssl.cipher_suites: []
# Configure curve types for ECDHE-based cipher suites
# #ssl.curve_types: []
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
#----------------------------- Logstash output ---------------------------------
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/elastic.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
# ------------------------------ Logstash Output -------------------------------
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }}
#------------------------------- Kafka output ----------------------------------
# -------------------------------- Kafka Output --------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -681,7 +840,11 @@ heartbeat.scheduler:
#username: ''
#password: ''
# Kafka version heartbeat is assumed to run against. Defaults to the "1.0.0".
# SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.
# Defaults to PLAIN when `username` and `password` are configured.
#sasl.mechanism: ''
# Kafka version Heartbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding
@ -705,8 +868,8 @@ heartbeat.scheduler:
# Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
# Strategy for fetching the topics metadata from the broker. Default is true.
#full: true
# Strategy for fetching the topics metadata from the broker. Default is false.
#full: false
# The number of concurrent load-balanced Kafka output workers.
#worker: 1
@ -718,10 +881,25 @@ heartbeat.scheduler:
# until all events are published. The default is 3.
#max_retries: 3
# The number of seconds to wait before trying to republish to Kafka
# after a network error. After waiting backoff.init seconds, the Beat
# tries to republish. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful publish, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to republish to
# Kafka after a network error. The default is 60s.
#backoff.max: 60s
# The maximum number of events to bulk in a single Kafka request. The default
# is 2048.
#bulk_max_size: 2048
# Duration to wait before sending bulk Kafka request. 0 is no delay. The default
# is 0.
#bulk_flush_frequency: 0s
# The number of seconds to wait for responses from the Kafka brokers before
# timing out. The default is 30s.
#timeout: 30s
@ -760,30 +938,37 @@ heartbeat.scheduler:
# purposes. The default is "beats".
#client_id: beats
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
@ -796,7 +981,38 @@ heartbeat.scheduler:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- Redis output ----------------------------------
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/security/keytabs/kafka.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# The service name. Service principal name is contructed from
# service_name/hostname@realm.
#kerberos.service_name: kafka
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
# -------------------------------- Redis Output --------------------------------
#output.redis:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -812,6 +1028,8 @@ heartbeat.scheduler:
# The list of Redis servers to connect to. If load-balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
# The hosts setting supports redis and rediss urls with custom password like
# redis://:password@localhost:6379.
#hosts: ["localhost:6379"]
# The name of the Redis list or channel the events are published to. The
@ -876,43 +1094,57 @@ heartbeat.scheduler:
# occurs on the proxy server.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- File output -----------------------------------
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# -------------------------------- File Output ---------------------------------
#output.file:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -934,7 +1166,7 @@ heartbeat.scheduler:
#filename: heartbeat
# Maximum size in kilobytes of each file. When this size is reached, and on
# every heartbeat restart, the files are rotated. The default value is 10240
# every Heartbeat restart, the files are rotated. The default value is 10240
# kB.
#rotate_every_kb: 10000
@ -946,8 +1178,7 @@ heartbeat.scheduler:
# Permissions to use for file creation. The default is 0600.
#permissions: 0600
#----------------------------- Console output ---------------------------------
# ------------------------------- Console Output -------------------------------
#output.console:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -960,77 +1191,99 @@ heartbeat.scheduler:
# Configure escaping HTML symbols in strings.
#escape_html: false
#================================= Paths ======================================
# =================================== Paths ====================================
# The home path for the heartbeat installation. This is the default base path
# The home path for the Heartbeat installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution (for example, the sample dashboards).
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the heartbeat installation. This is the default
# The configuration path for the Heartbeat installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the heartbeat installation. This is the default base path
# for all the files in which heartbeat needs to store its data. If not set by a
# The data path for the Heartbeat installation. This is the default base path
# for all the files in which Heartbeat needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for a heartbeat installation. This is the default location for
# The logs path for a Heartbeat installation. This is the default location for
# the Beat's log files. If not set by a CLI flag or in the configuration file,
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================ Keystore ==========================================
# ================================== Keystore ==================================
# Location of the Keystore containing the keys and their sensitive values.
#keystore.path: "${path.config}/beats.keystore"
#============================== Dashboards =====================================
# ================================= Dashboards =================================
{{ elk_macros.setup_dashboards('heartbeat') }}
#============================== Template =====================================
# ================================== Template ==================================
{{ elk_macros.setup_template('heartbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }}
#============================== Setup ILM =====================================
# ====================== Index Lifecycle Management (ILM) ======================
# Configure Index Lifecycle Management Index Lifecycle Management creates a
# write alias and adds additional settings to the template.
# The elasticsearch.output.index setting will be replaced with the write alias
# if ILM is enabled.
# Configure index lifecycle management (ILM). These settings create a write
# alias and add additional settings to the index template. When ILM is enabled,
# output.elasticsearch.index is ignored, and the write alias is used to set the
# index name.
# Enabled ILM support. Valid values are true, false, and auto. The beat will
# detect availabilty of Index Lifecycle Management in Elasticsearch and enable
# or disable ILM support.
# Enable ILM support. Valid values are true, false, and auto. When set to auto
# (the default), the Beat uses index lifecycle management when it connects to a
# cluster that supports ILM; otherwise, it creates daily indices.
#setup.ilm.enabled: auto
# Configure the ILM write alias name.
#setup.ilm.rollover_alias: "heartbeat"
# Set the prefix used in the index lifecycle write alias name. The default alias
# name is 'heartbeat-%{[agent.version]}'.
#setup.ilm.rollover_alias: 'heartbeat'
# Configure rollover index pattern.
# Set the rollover index pattern. The default is "%{now/d}-000001".
#setup.ilm.pattern: "{now/d}-000001"
{% if ilm_policy_name is defined %}
# Set the lifecycle policy name. The default policy name is
# 'beatname'.
setup.ilm.policy_name: "{{ ilm_policy_name }}"
{% endif %}
{% if ilm_policy_file_location is defined %}
# The path to a JSON file that contains a lifecycle policy configuration. Used
# to load your own lifecycle policy.
setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}"
{% endif %}
#============================== Kibana =====================================
# Disable the check for an existing lifecycle policy. The default is true. If
# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy
# can be installed.
#setup.ilm.check_exists: true
# Overwrite the lifecycle policy at startup. The default is false.
#setup.ilm.overwrite: false
# =================================== Kibana ===================================
{% if (groups['kibana'] | length) > 0 %}
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }}
{% endif %}
#================================ Logging ======================================
# ================================== Logging ===================================
{{ elk_macros.beat_logging('heartbeat', heartbeat_log_level) }}
#============================== Xpack Monitoring =====================================
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
# ============================= X-Pack Monitoring ==============================
{{ elk_macros.xpack_monitoring_elasticsearch('heartbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
# =============================== HTTP Endpoint ================================
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
@ -1039,18 +1292,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe.
# When using IP addresses, it is recommended to only use localhost.
#http.host: localhost
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= Process Security ================================
# Define which user should be owning the named pipe.
#http.named_pipe.user:
# Define which the permissions that should be applied to the named pipe, use the Security
# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with
# `http.user`.
#http.named_pipe.security_descriptor:
# ============================== Process Security ==============================
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true
#================================= Migration ==================================
# ============================== Instrumentation ===============================
# Instrumentation support for the heartbeat.
#instrumentation:
# Set to true to enable instrumentation of heartbeat.
#enabled: false
# Environment in which heartbeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# Enable profiling of the server, recording profile samples as events.
#
# This feature is experimental.
#profiling:
#cpu:
# Set to true to enable CPU profiling.
#enabled: false
#interval: 60s
#duration: 10s
#heap:
# Set to true to enable heap profiling.
#enabled: false
#interval: 60s
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: false

View File

@ -11,13 +11,21 @@
# For more available modules and options, please see the journalbeat.reference.yml sample
# configuration file.
#=========================== Journalbeat inputs =============================
# ============================= Journalbeat inputs =============================
journalbeat.inputs:
# Paths that should be crawled and fetched. Possible values files and directories.
# When setting a directory, all journals under it are merged.
# When empty starts to read from local journal.
- paths: {{ journal_paths | to_json }}
- paths:
{% for jp in journal_paths %}
- {{ jp }}
{% endfor %}
# An optional unique identifier for the input. By providing a unique `id` you
# can operate multiple inputs on the same journal. This allows each input's
# cursor to be persisted independently in the registry file.
#id: ""
# The number of seconds to wait before trying to read again from journals.
backoff: 10s
@ -33,6 +41,11 @@ journalbeat.inputs:
# Matching for nginx entries: "systemd.unit=nginx"
#include_matches: []
# Set the option to preserve the remote hostname in entries from a remote journal.
# It is only needed when used with add_host_metadata, so the original host name
# does not get overwritten by the processor.
#save_remote_hostname: false
# Optional fields that you can specify to add additional information to the
# output. Fields can be scalar values, arrays, dictionaries, or any nested
# combination of these.
@ -40,19 +53,19 @@ journalbeat.inputs:
# env: staging
#========================= Journalbeat global options ============================
# ========================= Journalbeat global options =========================
journalbeat:
# Name of the registry file. If a relative path is used, it is considered relative to the
# data path.
registry_file: registry
#==================== Elasticsearch template setting ==========================
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
#================================ General ======================================
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
@ -94,9 +107,44 @@ queue:
flush.min_events: {{ journalbeat_queue_flush_min_events }}
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < min_flush_events.
# if the number of events stored in the queue is < `flush.min_events`.
flush.timeout: {{ journalbeat_queue_flush_timeout }}
# The disk queue stores incoming events on disk until the output is
# ready for them. This allows a higher event limit than the memory-only
# queue and lets pending events persist through a restart.
#disk:
# The directory path to store the queue's data.
#path: "${path.data}/diskqueue"
# The maximum space the queue should occupy on disk. Depending on
# input settings, events that exceed this limit are delayed or discarded.
#max_size: 10GB
# The maximum size of a single queue data file. Data in the queue is
# stored in smaller segments that are deleted after all their events
# have been processed.
#segment_size: 1GB
# The number of events to read from disk to memory while waiting for
# the output to request them.
#read_ahead: 512
# The number of events to accept from inputs while waiting for them
# to be written to disk. If event data arrives faster than it
# can be written to disk, this setting prevents it from overflowing
# main memory.
#write_ahead: 2048
# The duration to wait before retrying when the queue encounters a disk
# write error.
#retry_interval: 1s
# The maximum length of time to wait before retrying on a disk write
# error. If the queue encounters repeated errors, it will double the
# length of its retry interval each time, up to this maximum.
#max_retry_interval: 30s
# The spool queue will store events in a local spool file, before
# forwarding the events to the outputs.
#
@ -161,7 +209,7 @@ queue:
# default is the number of logical CPUs available in the system.
#max_procs:
#================================ Processors ===================================
# ================================= Processors =================================
{{ elk_macros.beat_processors(processors) }}
# Processors are used to reduce the number of fields in the exported event or to
@ -179,103 +227,155 @@ queue:
# values:
#
#processors:
#- include_fields:
# fields: ["cpu"]
#- drop_fields:
# fields: ["cpu.user", "cpu.system"]
# - include_fields:
# fields: ["cpu"]
# - drop_fields:
# fields: ["cpu.user", "cpu.system"]
#
# The following example drops the events that have the HTTP response code 200:
#
#processors:
#- drop_event:
# when:
# equals:
# http.code: 200
# - drop_event:
# when:
# equals:
# http.code: 200
#
# The following example renames the field a to b:
#
#processors:
#- rename:
# fields:
# - from: "a"
# to: "b"
# - rename:
# fields:
# - from: "a"
# to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#- dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
# - dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
#
#processors:
#- add_cloud_metadata: ~
# - add_cloud_metadata: ~
#
# The following example enriches each event with the machine's local time zone
# offset from UTC.
#
#processors:
#- add_locale:
# format: offset
# - add_locale:
# format: offset
#
# The following example enriches each event with docker metadata, it matches
# given fields to an existing container id and adds info from that container:
#
#processors:
#- add_docker_metadata:
# host: "unix:///var/run/docker.sock"
# match_fields: ["system.process.cgroup.id"]
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
# # certificate: "/etc/pki/client/cert.pem"
# # key: "/etc/pki/client/cert.key"
# - add_docker_metadata:
# host: "unix:///var/run/docker.sock"
# match_fields: ["system.process.cgroup.id"]
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
# # certificate: "/etc/pki/client/cert.pem"
# # key: "/etc/pki/client/cert.key"
#
# The following example enriches each event with docker metadata, it matches
# container id from log path available in `source` field (by default it expects
# it to be /var/lib/docker/containers/*/*.log).
#
#processors:
#- add_docker_metadata: ~
# - add_docker_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#- add_host_metadata:
# netinfo.enabled: false
# - add_host_metadata: ~
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#- add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
# - add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#- decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
# - decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
#
#processors:
# - decompress_gzip_field:
# from: "field1"
# to: "field2"
# ignore_missing: false
# fail_on_error: true
#
# The following example copies the value of message to message_copied
#
#processors:
# - copy_fields:
# fields:
# - from: message
# to: message_copied
# fail_on_error: true
# ignore_missing: false
#
# The following example truncates the value of message to 1024 bytes
#
#processors:
# - truncate_fields:
# fields:
# - message
# max_bytes: 1024
# fail_on_error: false
# ignore_missing: true
#
# The following example preserves the raw message under event.original
#
#processors:
# - copy_fields:
# fields:
# - from: message
# to: event.original
# fail_on_error: false
# ignore_missing: true
# - truncate_fields:
# fields:
# - event.original
# max_bytes: 1024
# fail_on_error: false
# ignore_missing: true
#
# The following example URL-decodes the value of field1 to field2
#
#processors:
# - urldecode:
# fields:
# - from: "field1"
# to: "field2"
# ignore_missing: false
# fail_on_error: true
#============================= Elastic Cloud ==================================
# =============================== Elastic Cloud ================================
# These settings simplify using journalbeat with the Elastic Cloud (https://cloud.elastic.co/).
# These settings simplify using Journalbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
@ -286,11 +386,11 @@ queue:
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs ======================================
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output -------------------------------
# ---------------------------- Elasticsearch Output ----------------------------
#output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -307,8 +407,11 @@ queue:
# Configure escaping HTML symbols in strings.
#escape_html: false
# Optional protocol and basic auth credentials.
# Protocol - either `http` (default) or `https`.
#protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
@ -338,6 +441,11 @@ queue:
# Proxy server URL
#proxy_url: http://proxy:3128
# Whether to disable proxy settings for outgoing connections. If true, this
# takes precedence over both the proxy_url field and any environment settings
# (HTTP_PROXY, HTTPS_PROXY). The default is false.
#proxy_disable: false
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
@ -364,15 +472,23 @@ queue:
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL-based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
@ -396,11 +512,37 @@ queue:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
#----------------------------- Logstash output ---------------------------------
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/elastic.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
# ------------------------------ Logstash Output -------------------------------
{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count, 'journalbeat') }}
#------------------------------- Kafka output ----------------------------------
# -------------------------------- Kafka Output --------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -435,7 +577,11 @@ queue:
#username: ''
#password: ''
# Kafka version journalbeat is assumed to run against. Defaults to the "1.0.0".
# SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.
# Defaults to PLAIN when `username` and `password` are configured.
#sasl.mechanism: ''
# Kafka version Journalbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding
@ -459,8 +605,8 @@ queue:
# Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
# Strategy for fetching the topics metadata from the broker. Default is true.
#full: true
# Strategy for fetching the topics metadata from the broker. Default is false.
#full: false
# The number of concurrent load-balanced Kafka output workers.
#worker: 1
@ -472,10 +618,25 @@ queue:
# until all events are published. The default is 3.
#max_retries: 3
# The number of seconds to wait before trying to republish to Kafka
# after a network error. After waiting backoff.init seconds, the Beat
# tries to republish. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful publish, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to republish to
# Kafka after a network error. The default is 60s.
#backoff.max: 60s
# The maximum number of events to bulk in a single Kafka request. The default
# is 2048.
#bulk_max_size: 2048
# Duration to wait before sending bulk Kafka request. 0 is no delay. The default
# is 0.
#bulk_flush_frequency: 0s
# The number of seconds to wait for responses from the Kafka brokers before
# timing out. The default is 30s.
#timeout: 30s
@ -514,30 +675,37 @@ queue:
# purposes. The default is "beats".
#client_id: beats
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
@ -550,7 +718,38 @@ queue:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- Redis output ----------------------------------
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/security/keytabs/kafka.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# The service name. Service principal name is contructed from
# service_name/hostname@realm.
#kerberos.service_name: kafka
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
# -------------------------------- Redis Output --------------------------------
#output.redis:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -566,6 +765,8 @@ queue:
# The list of Redis servers to connect to. If load-balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
# The hosts setting supports redis and rediss urls with custom password like
# redis://:password@localhost:6379.
#hosts: ["localhost:6379"]
# The name of the Redis list or channel the events are published to. The
@ -630,43 +831,57 @@ queue:
# occurs on the proxy server.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------- File output -----------------------------------
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# -------------------------------- File Output ---------------------------------
#output.file:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -688,7 +903,7 @@ queue:
#filename: journalbeat
# Maximum size in kilobytes of each file. When this size is reached, and on
# every journalbeat restart, the files are rotated. The default value is 10240
# every Journalbeat restart, the files are rotated. The default value is 10240
# kB.
#rotate_every_kb: 10000
@ -700,8 +915,7 @@ queue:
# Permissions to use for file creation. The default is 0600.
#permissions: 0600
#----------------------------- Console output ---------------------------------
# ------------------------------- Console Output -------------------------------
#output.console:
# Boolean flag to enable or disable the output module.
#enabled: true
@ -714,78 +928,99 @@ queue:
# Configure escaping HTML symbols in strings.
#escape_html: false
#================================= Paths ======================================
# =================================== Paths ====================================
# The home path for the journalbeat installation. This is the default base path
# The home path for the Journalbeat installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution (for example, the sample dashboards).
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the journalbeat installation. This is the default
# The configuration path for the Journalbeat installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the journalbeat installation. This is the default base path
# for all the files in which journalbeat needs to store its data. If not set by a
# The data path for the Journalbeat installation. This is the default base path
# for all the files in which Journalbeat needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for a journalbeat installation. This is the default location for
# The logs path for a Journalbeat installation. This is the default location for
# the Beat's log files. If not set by a CLI flag or in the configuration file,
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================ Keystore ==========================================
# ================================== Keystore ==================================
# Location of the Keystore containing the keys and their sensitive values.
#keystore.path: "${path.config}/beats.keystore"
#============================== Dashboards =====================================
# ================================= Dashboards =================================
{{ elk_macros.setup_dashboards('journalbeat') }}
#============================== Template =====================================
# ================================== Template ==================================
{{ elk_macros.setup_template('journalbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }}
#============================== Setup ILM =====================================
# ====================== Index Lifecycle Management (ILM) ======================
# Configure Index Lifecycle Management Index Lifecycle Management creates a
# write alias and adds additional settings to the template.
# The elasticsearch.output.index setting will be replaced with the write alias
# if ILM is enabled.
# Configure index lifecycle management (ILM). These settings create a write
# alias and add additional settings to the index template. When ILM is enabled,
# output.elasticsearch.index is ignored, and the write alias is used to set the
# index name.
# Enabled ILM support. Valid values are true, false, and auto. The beat will
# detect availabilty of Index Lifecycle Management in Elasticsearch and enable
# or disable ILM support.
# Enable ILM support. Valid values are true, false, and auto. When set to auto
# (the default), the Beat uses index lifecycle management when it connects to a
# cluster that supports ILM; otherwise, it creates daily indices.
#setup.ilm.enabled: auto
# Configure the ILM write alias name.
#setup.ilm.rollover_alias: "journalbeat"
# Set the prefix used in the index lifecycle write alias name. The default alias
# name is 'journalbeat-%{[agent.version]}'.
#setup.ilm.rollover_alias: 'journalbeat'
# Configure rollover index pattern.
# Set the rollover index pattern. The default is "%{now/d}-000001".
#setup.ilm.pattern: "{now/d}-000001"
{% if ilm_policy_name is defined %}
# Set the lifecycle policy name. The default policy name is
# 'beatname'.
setup.ilm.policy_name: "{{ ilm_policy_name }}"
{% endif %}
{% if ilm_policy_file_location is defined %}
# The path to a JSON file that contains a lifecycle policy configuration. Used
# to load your own lifecycle policy.
setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}"
{% endif %}
#============================== Kibana =====================================
{% endif %}
# Disable the check for an existing lifecycle policy. The default is true. If
# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy
# can be installed.
#setup.ilm.check_exists: true
# Overwrite the lifecycle policy at startup. The default is false.
#setup.ilm.overwrite: false
# =================================== Kibana ===================================
{% if (groups['kibana'] | length) > 0 %}
{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }}
{% endif %}
#================================ Logging ======================================
# ================================== Logging ===================================
{{ elk_macros.beat_logging('journalbeat', journalbeat_log_level) }}
#============================== Xpack Monitoring =====================================
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
# ============================= X-Pack Monitoring ==============================
{{ elk_macros.xpack_monitoring_elasticsearch('journalbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
# =============================== HTTP Endpoint ================================
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
@ -794,18 +1029,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe.
# When using IP addresses, it is recommended to only use localhost.
#http.host: localhost
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= Process Security ================================
# Define which user should be owning the named pipe.
#http.named_pipe.user:
# Define which the permissions that should be applied to the named pipe, use the Security
# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with
# `http.user`.
#http.named_pipe.security_descriptor:
# ============================== Process Security ==============================
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true
#================================= Migration ==================================
# ============================== Instrumentation ===============================
# Instrumentation support for the journalbeat.
#instrumentation:
# Set to true to enable instrumentation of journalbeat.
#enabled: false
# Environment in which journalbeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# Enable profiling of the server, recording profile samples as events.
#
# This feature is experimental.
#profiling:
#cpu:
# Set to true to enable CPU profiling.
#enabled: false
#interval: 60s
#duration: 10s
#heap:
# Set to true to enable heap profiling.
#enabled: false
#interval: 60s
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: false

View File

@ -1,5 +1,4 @@
{% macro output_elasticsearch(host, data_hosts) -%}
#-------------------------- Elasticsearch output -------------------------------
{% macro output_elasticsearch(beat_name, host, data_hosts) -%}
output.elasticsearch:
# Boolean flag to enable or disable the output module.
enabled: true
@ -13,12 +12,18 @@ output.elasticsearch:
# Set gzip compression level.
compression_level: 3
# Optional protocol and basic auth credentials.
# Configure escaping HTML symbols in strings.
#escape_html: false
# Protocol - either `http` (default) or `https`.
#protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
# Dictionary of HTTP parameters to pass within the URL with index operations.
#parameters:
#param1: value1
#param2: value2
@ -26,24 +31,29 @@ output.elasticsearch:
# Number of workers per Elasticsearch host.
worker: 1
# Optional index name. The default is "apm" plus date
# and generates [apm-]YYYY.MM.DD keys.
# Optional index name. The default is "{{ beat_name }}" plus date
# and generates [{{ beat_name }}-]YYYY.MM.DD keys.
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
#index: "{{ beat_name }}-%{[agent.version]}-%{+yyyy.MM.dd}"
# Optional ingest node pipeline. By default no pipeline will be used.
#pipeline: ""
# Optional HTTP Path
# Optional HTTP path
#path: "/elasticsearch"
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
# Proxy server URL
#proxy_url: http://proxy:3128
# Whether to disable proxy settings for outgoing connections. If true, this
# takes precedence over both the proxy_url field and any environment settings
# (HTTP_PROXY, HTTPS_PROXY). The default is false.
#proxy_disable: false
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
@ -53,44 +63,89 @@ output.elasticsearch:
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure HTTP request timeout before failing a request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS. Default is true.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/elastic.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
{%- endmacro %}
{% macro output_logstash(host, data_hosts, processors, named_index) -%}
@ -140,9 +195,9 @@ output.logstash:
# Logstash after a network error. The default is 60s.
#backoff.max: 60s
# Optional index name. The default index name is set to journalbeat
# in all lowercase.
{% if named_index is defined %}
# Optional index name. The default index name is set to {{ named_index }}
# in all lowercase.
index: '{{ named_index }}'
{% endif %}
@ -152,20 +207,27 @@ output.logstash:
# Resolve names locally when using a proxy server. Defaults to false.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
@ -175,7 +237,7 @@ output.logstash:
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
@ -188,6 +250,12 @@ output.logstash:
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
@ -255,20 +323,31 @@ setup.dashboards.enabled: false
# These settings can be adjusted to load your own template or overwrite existing ones.
# Set to false to disable template loading.
setup.template.enabled: {{ host == data_nodes[0] | default(false) }}
setup.template.enabled: {{ (host == data_nodes[0]) | default(false) | lower }}
# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}"
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
setup.template.name: "{{ beat_name }}-%{[beat.version]}"
# Select the kind of index template. From Elasticsearch 7.8, it is possible to
# use component templates. Available options: legacy, component, index.
# By default {{ beat_name }} uses the legacy index templates.
#setup.template.type: legacy
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
# Template name. By default the template name is "{{ beat_name }}-%{[agent.version]}"
# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.
setup.template.name: "{{ beat_name }}-%{[agent.version]}"
# Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings.
# The first part is the version of the beat and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
setup.template.pattern: "{{ beat_name }}-%{[beat.version]}-*"
# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.
setup.template.pattern: "{{ beat_name }}-%{[agent.version]}-*"
# Path to fields.yml file to generate the template
setup.template.fields: "${path.config}/fields.yml"
# A list of fields to be added to the template and Kibana index pattern. Also
# specify setup.template.overwrite: true to overwrite the existing template.
#setup.template.append_fields:
#- name: field_name
# type: field_type
# Enable JSON template loading. If this is enabled, the fields.yml is ignored.
#setup.template.json.enabled: false
@ -279,10 +358,11 @@ setup.template.fields: "${path.config}/fields.yml"
#setup.template.json.name: ""
# Overwrite existing template
setup.template.overwrite: {{ host == data_nodes[0] | default(false)}}
# Do not enable this option for more than one instance of {{ beat_name }} as it might
# overload your Elasticsearch with too many update requests.
setup.template.overwrite: {{ (host == data_nodes[0]) | default(false) | lower }}
{% set shards = elasticsearch_beat_settings.shard_count | int %}
# Elasticsearch template settings
setup.template.settings:
@ -301,7 +381,6 @@ setup.template.settings:
{% if 'max_docvalue_fields_search' in elasticsearch_beat_settings %}
max_docvalue_fields_search: {{ elasticsearch_beat_settings.max_docvalue_fields_search | int }}
{% endif %}
# A dictionary of settings for the _source field. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
_source:
@ -324,40 +403,60 @@ setup.kibana:
#username: "elastic"
#password: "changeme"
# Optional HTTP Path
# Optional HTTP path
#path: ""
# Use SSL settings for HTTPS. Default is true.
# Optional Kibana space ID.
#space.id: ""
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
{%- endmacro %}
{% macro beat_logging(beat_name, log_level='info') -%}
@ -369,17 +468,20 @@ setup.kibana:
logging.level: {{ log_level }}
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
# Other available selectors are "beat", "publisher", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to stderr. The default is false.
#logging.to_stderr: false
# Send all logging output to syslog. The default is false.
#logging.to_syslog: false
# Send all logging output to Windows Event Logs. The default is false.
#logging.to_eventlog: false
# If enabled, packetbeat periodically logs its internal metrics that have changed
# If enabled, {{ (beat_name == 'apm-server') | ternary( beat_name, beat_name | capitalize) }} periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
@ -414,27 +516,43 @@ logging.files:
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
# reported by the local system clock. All other intervals are calculated from the
# unix epoch. Defaults to disabled.
# Unix epoch. Defaults to disabled.
#interval: 0
# Set to true to log messages in json format.
# Rotate existing logs on startup rather than appending to the existing
# file. Defaults to true.
# rotateonstartup: true
# Set to true to log messages in JSON format.
#logging.json: false
# Set to true, to log messages with minimal required Elastic Common Schema (ECS)
# information. Recommended to use in combination with `logging.json=true`
# Defaults to false.
#logging.ecs: false
{%- endmacro %}
{% macro xpack_monitoring_elasticsearch(host, data_hosts, processors) -%}
# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster.
# This requires xpack monitoring to be enabled in Elasticsearch.
# The reporting is disabled by default.
{% macro xpack_monitoring_elasticsearch(beat_name, host, data_hosts, processors) -%}
# {{ beat_name | capitalize }} can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
xpack.monitoring.enabled: true
monitoring.enabled: true
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# {{ beat_name | capitalize }} instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line, and leave the rest commented out.
xpack.monitoring.elasticsearch:
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
monitoring.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
@ -445,12 +563,15 @@ xpack.monitoring.elasticsearch:
# Set gzip compression level.
compression_level: 9
# Optional protocol and basic auth credentials.
# Protocol - either `http` (default) or `https`.
#protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "beats_system"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
# Dictionary of HTTP parameters to pass within the URL with index operations.
#parameters:
#param1: value1
#param2: value2
@ -482,52 +603,92 @@ xpack.monitoring.elasticsearch:
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure http request timeout before failing an request to Elasticsearch.
# Configure HTTP request timeout before failing an request to Elasticsearch.
timeout: 120
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
# Controls the verification of certificates. Valid values are:
# * full, which verifies that the provided certificate is signed by a trusted
# authority (CA) and also verifies that the server's hostname (or IP address)
# matches the names identified within the certificate.
# * certificate, which verifies that the provided certificate is signed by a
# trusted authority (CA), but does not perform any hostname verification.
# * none, which performs no verification of the server's certificate. This
# mode disables many of the security benefits of SSL/TLS and should only be used
# after very careful consideration. It is primarily intended as a temporary
# diagnostic mechanism when attempting to resolve TLS errors; its use in
# production environments is strongly discouraged.
# The default value is full.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
# Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
# Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/elastic.keytab
# Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
# Name of the Kerberos user.
#kerberos.username: elastic
# Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
# Kerberos realm.
#kerberos.realm: ELASTIC
#metrics.period: 10s
#state.period: 1m
# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`
# setting. You can find the value for this setting in the Elastic Cloud web UI.
#monitoring.cloud.id:
# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`
# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.
#monitoring.cloud.auth:
{%- endmacro %}
{% macro beat_processors(processors) -%}
# Processors are used to reduce the number of fields in the exported event or to
# enhance the event with external metadata.
processors:
{% if processors is defined and processors is iterable and processors | length > 0 %}
{{ processors | to_yaml }}