840 lines
31 KiB
Django/Jinja
840 lines
31 KiB
Django/Jinja
########################## Auditbeat Configuration #############################
|
|
|
|
# This is a reference configuration file documenting all non-deprecated options
|
|
# in comments. For a shorter configuration example that contains only the most
|
|
# common options, please see auditbeat.yml in the same directory.
|
|
#
|
|
# You can find the full configuration reference here:
|
|
# https://www.elastic.co/guide/en/beats/auditbeat/index.html
|
|
|
|
#============================ Config Reloading ================================
|
|
|
|
# Config reloading allows to dynamically load modules. Each file which is
|
|
# monitored must contain one or multiple modules as a list.
|
|
auditbeat.config.modules:
|
|
|
|
# Glob pattern for configuration reloading
|
|
path: ${path.config}/conf.d/*.yml
|
|
|
|
# Period on which files under path should be checked for changes
|
|
reload.period: 60s
|
|
|
|
# Set to true to enable config reloading
|
|
reload.enabled: true
|
|
|
|
# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
|
|
# disable startup delay.
|
|
auditbeat.max_start_delay: 10s
|
|
|
|
#========================== Modules configuration =============================
|
|
auditbeat.modules:
|
|
|
|
# The auditd module collects events from the audit framework in the Linux
|
|
# kernel. You need to specify audit rules for the events that you want to audit.
|
|
- module: auditd
|
|
socket_type: {{ (apply_security_hardening | default(true) | bool) | ternary('multicast', 'unicast') }}
|
|
resolve_ids: true
|
|
failure_mode: silent
|
|
backlog_limit: 8196
|
|
rate_limit: 0
|
|
include_raw_message: false
|
|
include_warnings: true
|
|
|
|
{% if not apply_security_hardening | default(true) | bool %}
|
|
audit_rules: |
|
|
## Define audit rules here.
|
|
## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
|
|
## examples or add your own rules.
|
|
|
|
## If you are on a 64 bit platform, everything should be running
|
|
## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
|
|
## because this might be a sign of someone exploiting a hole in the 32
|
|
## bit API.
|
|
-a always,exit -F arch=b32 -S all -F key=32bit-abi
|
|
|
|
## Executions.
|
|
-a always,exit -F arch=b64 -S execve,execveat -k exec
|
|
|
|
# Things that affect identity.
|
|
-w /etc/group -p wa -k identity
|
|
-w /etc/passwd -p wa -k identity
|
|
-w /etc/gshadow -p wa -k identity
|
|
-w /etc/shadow -p wa -k identity
|
|
|
|
# Unauthorized access attempts to files (unsuccessful).
|
|
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access
|
|
-a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access
|
|
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access
|
|
-a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access
|
|
|
|
{% endif %}
|
|
|
|
# The file integrity module sends events when files are changed (created,
|
|
# updated, deleted). The events contain file metadata and hashes.
|
|
- module: file_integrity
|
|
paths:
|
|
- /bin
|
|
- /etc/ansible/roles
|
|
- /etc/apt
|
|
- /etc/apache2
|
|
- /etc/httpd
|
|
- /etc/network
|
|
- /etc/nginx
|
|
- /etc/mysql
|
|
- /etc/openstack_deploy
|
|
- /etc/sysconfig
|
|
- /etc/systemd
|
|
- /etc/uwsgi
|
|
- /etc/yum
|
|
- /etc/zypp
|
|
- /openstack/venvs
|
|
- /opt/openstack-ansible
|
|
- /sbin
|
|
- /usr/bin
|
|
- /usr/local/bin
|
|
- /usr/sbin
|
|
- /var/lib/lxc
|
|
|
|
# List of regular expressions to filter out notifications for unwanted files.
|
|
# Wrap in single quotes to workaround YAML escaping rules. By default no files
|
|
# are ignored.
|
|
exclude_files:
|
|
- '(?i)\.sw[nop]$'
|
|
- '~$'
|
|
- '/\.git($|/)'
|
|
|
|
# Scan over the configured file paths at startup and send events for new or
|
|
# modified files since the last time Auditbeat was running.
|
|
scan_at_start: true
|
|
|
|
# Average scan rate. This throttles the amount of CPU and I/O that Auditbeat
|
|
# consumes at startup while scanning. Default is "50 MiB".
|
|
scan_rate_per_sec: 64 MiB
|
|
|
|
# Limit on the size of files that will be hashed. Default is "100 MiB".
|
|
# Limit on the size of files that will be hashed. Default is "100 MiB".
|
|
max_file_size: 128 MiB
|
|
|
|
# Hash types to compute when the file changes. Supported types are
|
|
# blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384,
|
|
# sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512.
|
|
# Default is sha1.
|
|
hash_types: [sha1]
|
|
|
|
# Detect changes to files included in subdirectories. Disabled by default.
|
|
recursive: true
|
|
|
|
|
|
#================================ General ======================================
|
|
|
|
# The name of the shipper that publishes the network data. It can be used to group
|
|
# all the transactions sent by a single shipper in the web interface.
|
|
# If this options is not defined, the hostname is used.
|
|
#name:
|
|
|
|
# The tags of the shipper are included in their own field with each
|
|
# transaction published. Tags make it easy to group servers by different
|
|
# logical properties.
|
|
#tags: ["service-X", "web-tier"]
|
|
|
|
# Optional fields that you can specify to add additional information to the
|
|
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
|
# combination of these.
|
|
#fields:
|
|
# env: staging
|
|
|
|
# If this option is set to true, the custom fields are stored as top-level
|
|
# fields in the output document instead of being grouped under a fields
|
|
# sub-dictionary. Default is false.
|
|
#fields_under_root: false
|
|
|
|
# Internal queue configuration for buffering events to be published.
|
|
#queue:
|
|
# Queue type by name (default 'mem')
|
|
# The memory queue will present all available events (up to the outputs
|
|
# bulk_max_size) to the output, the moment the output is ready to server
|
|
# another batch of events.
|
|
#mem:
|
|
# Max number of events the queue can buffer.
|
|
#events: 4096
|
|
|
|
# Hints the minimum number of events stored in the queue,
|
|
# before providing a batch of events to the outputs.
|
|
# The default value is set to 2048.
|
|
# A value of 0 ensures events are immediately available
|
|
# to be sent to the outputs.
|
|
#flush.min_events: 2048
|
|
|
|
# Maximum duration after which events are available to the outputs,
|
|
# if the number of events stored in the queue is < min_flush_events.
|
|
#flush.timeout: 1s
|
|
|
|
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
|
# default is the number of logical CPUs available in the system.
|
|
#max_procs:
|
|
|
|
#================================ Processors ===================================
|
|
|
|
# Processors are used to reduce the number of fields in the exported event or to
|
|
# enhance the event with external metadata. This section defines a list of
|
|
# processors that are applied one by one and the first one receives the initial
|
|
# event:
|
|
#
|
|
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
|
#
|
|
# The supported processors are drop_fields, drop_event, include_fields, and
|
|
# add_cloud_metadata.
|
|
#
|
|
# For example, you can use the following processors to keep the fields that
|
|
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
|
# values:
|
|
#
|
|
#processors:
|
|
#- include_fields:
|
|
# fields: ["cpu"]
|
|
#- drop_fields:
|
|
# fields: ["cpu.user", "cpu.system"]
|
|
#
|
|
# The following example drops the events that have the HTTP response code 200:
|
|
#
|
|
#processors:
|
|
#- drop_event:
|
|
# when:
|
|
# equals:
|
|
# http.code: 200
|
|
#
|
|
# The following example enriches each event with metadata from the cloud
|
|
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
|
# Tencent Cloud, and Alibaba Cloud.
|
|
#
|
|
#processors:
|
|
#- add_cloud_metadata: ~
|
|
#
|
|
# The following example enriches each event with the machine's local time zone
|
|
# offset from UTC.
|
|
#
|
|
#processors:
|
|
#- add_locale:
|
|
# format: offset
|
|
#
|
|
# The following example enriches each event with docker metadata, it matches
|
|
# given fields to an existing container id and adds info from that container:
|
|
#
|
|
#processors:
|
|
#- add_docker_metadata:
|
|
# host: "unix:///var/run/docker.sock"
|
|
# match_fields: ["system.process.cgroup.id"]
|
|
# match_pids: ["process.pid", "process.ppid"]
|
|
# match_source: true
|
|
# match_source_index: 4
|
|
# match_short_id: false
|
|
# cleanup_timeout: 60
|
|
# # To connect to Docker over TLS you must specify a client and CA certificate.
|
|
# #ssl:
|
|
# # certificate_authority: "/etc/pki/root/ca.pem"
|
|
# # certificate: "/etc/pki/client/cert.pem"
|
|
# # key: "/etc/pki/client/cert.key"
|
|
#
|
|
# The following example enriches each event with docker metadata, it matches
|
|
# container id from log path available in `source` field (by default it expects
|
|
# it to be /var/lib/docker/containers/*/*.log).
|
|
#
|
|
#processors:
|
|
#- add_docker_metadata: ~
|
|
#- add_host_metadata: ~
|
|
|
|
#============================= Elastic Cloud ==================================
|
|
|
|
# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
|
|
|
|
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
|
# `setup.kibana.host` options.
|
|
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
|
#cloud.id:
|
|
|
|
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
|
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
|
#cloud.auth:
|
|
|
|
#================================ Outputs ======================================
|
|
|
|
# Configure what output to use when sending the data collected by the beat.
|
|
|
|
#-------------------------- Elasticsearch output -------------------------------
|
|
#output.elasticsearch:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# Array of hosts to connect to.
|
|
# Scheme and port can be left out and will be set to the default (http and 9200)
|
|
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
|
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
|
#hosts: ["localhost:9200"]
|
|
|
|
# Set gzip compression level.
|
|
#compression_level: 0
|
|
|
|
# Optional protocol and basic auth credentials.
|
|
#protocol: "https"
|
|
#username: "elastic"
|
|
#password: "changeme"
|
|
|
|
# Dictionary of HTTP parameters to pass within the url with index operations.
|
|
#parameters:
|
|
#param1: value1
|
|
#param2: value2
|
|
|
|
# Number of workers per Elasticsearch host.
|
|
#worker: 1
|
|
|
|
# Optional index name. The default is "auditbeat" plus date
|
|
# and generates [auditbeat-]YYYY.MM.DD keys.
|
|
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
|
|
#index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}"
|
|
|
|
# Optional ingest node pipeline. By default no pipeline will be used.
|
|
#pipeline: ""
|
|
|
|
# Optional HTTP Path
|
|
#path: "/elasticsearch"
|
|
|
|
# Custom HTTP headers to add to each request
|
|
#headers:
|
|
# X-My-Header: Contents of the header
|
|
|
|
# Proxy server url
|
|
#proxy_url: http://proxy:3128
|
|
|
|
# The number of times a particular Elasticsearch index operation is attempted. If
|
|
# the indexing operation doesn't succeed after this many retries, the events are
|
|
# dropped. The default is 3.
|
|
#max_retries: 3
|
|
|
|
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
|
# The default is 50.
|
|
#bulk_max_size: 50
|
|
|
|
# Configure http request timeout before failing an request to Elasticsearch.
|
|
#timeout: 90
|
|
|
|
# Use SSL settings for HTTPS.
|
|
#ssl.enabled: true
|
|
|
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
# and certificates will be accepted. In this mode, SSL based connections are
|
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
# `full`.
|
|
#ssl.verification_mode: full
|
|
|
|
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
# 1.2 are enabled.
|
|
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
|
|
# SSL configuration. By default is off.
|
|
# List of root certificates for HTTPS server verifications
|
|
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
|
|
# Certificate for SSL client authentication
|
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
|
|
# Client Certificate Key
|
|
#ssl.key: "/etc/pki/client/cert.key"
|
|
|
|
# Optional passphrase for decrypting the Certificate Key.
|
|
#ssl.key_passphrase: ''
|
|
|
|
# Configure cipher suites to be used for SSL connections
|
|
#ssl.cipher_suites: []
|
|
|
|
# Configure curve types for ECDHE based cipher suites
|
|
#ssl.curve_types: []
|
|
|
|
# Configure what types of renegotiation are supported. Valid options are
|
|
# never, once, and freely. Default is never.
|
|
#ssl.renegotiation: never
|
|
|
|
|
|
#----------------------------- Logstash output ---------------------------------
|
|
{% include 'templates/_include_log_stash_output.yml.j2' %}
|
|
|
|
#------------------------------- Kafka output ----------------------------------
|
|
#output.kafka:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# The list of Kafka broker addresses from where to fetch the cluster metadata.
|
|
# The cluster metadata contain the actual Kafka brokers events are published
|
|
# to.
|
|
#hosts: ["localhost:9092"]
|
|
|
|
# The Kafka topic used for produced events. The setting can be a format string
|
|
# using any event field. To set the topic from document type use `%{[type]}`.
|
|
#topic: beats
|
|
|
|
# The Kafka event key setting. Use format string to create unique event key.
|
|
# By default no event key will be generated.
|
|
#key: ''
|
|
|
|
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
|
|
# using the `output.kafka.key` setting or randomly distributes events if
|
|
# `output.kafka.key` is not configured.
|
|
#partition.hash:
|
|
# If enabled, events will only be published to partitions with reachable
|
|
# leaders. Default is false.
|
|
#reachable_only: false
|
|
|
|
# Configure alternative event field names used to compute the hash value.
|
|
# If empty `output.kafka.key` setting will be used.
|
|
# Default value is empty list.
|
|
#hash: []
|
|
|
|
# Authentication details. Password is required if username is set.
|
|
#username: ''
|
|
#password: ''
|
|
|
|
# Kafka version auditbeat is assumed to run against. Defaults to the oldest
|
|
# supported stable version (currently version 0.8.2.0)
|
|
#version: 0.8.2
|
|
|
|
# Metadata update configuration. Metadata do contain leader information
|
|
# deciding which broker to use when publishing.
|
|
#metadata:
|
|
# Max metadata request retry attempts when cluster is in middle of leader
|
|
# election. Defaults to 3 retries.
|
|
#retry.max: 3
|
|
|
|
# Waiting time between retries during leader elections. Default is 250ms.
|
|
#retry.backoff: 250ms
|
|
|
|
# Refresh metadata interval. Defaults to every 10 minutes.
|
|
#refresh_frequency: 10m
|
|
|
|
# The number of concurrent load-balanced Kafka output workers.
|
|
#worker: 1
|
|
|
|
# The number of times to retry publishing an event after a publishing failure.
|
|
# After the specified number of retries, the events are typically dropped.
|
|
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
|
# all events are published. Set max_retries to a value less than 0 to retry
|
|
# until all events are published. The default is 3.
|
|
#max_retries: 3
|
|
|
|
# The maximum number of events to bulk in a single Kafka request. The default
|
|
# is 2048.
|
|
#bulk_max_size: 2048
|
|
|
|
# The number of seconds to wait for responses from the Kafka brokers before
|
|
# timing out. The default is 30s.
|
|
#timeout: 30s
|
|
|
|
# The maximum duration a broker will wait for number of required ACKs. The
|
|
# default is 10s.
|
|
#broker_timeout: 10s
|
|
|
|
# The number of messages buffered for each Kafka broker. The default is 256.
|
|
#channel_buffer_size: 256
|
|
|
|
# The keep-alive period for an active network connection. If 0s, keep-alives
|
|
# are disabled. The default is 0 seconds.
|
|
#keep_alive: 0
|
|
|
|
# Sets the output compression codec. Must be one of none, snappy and gzip. The
|
|
# default is gzip.
|
|
#compression: gzip
|
|
|
|
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
|
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
|
# or less than the broker's message.max.bytes.
|
|
#max_message_bytes: 1000000
|
|
|
|
# The ACK reliability level required from broker. 0=no response, 1=wait for
|
|
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
|
|
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
|
|
# on error.
|
|
#required_acks: 1
|
|
|
|
# The configurable ClientID used for logging, debugging, and auditing
|
|
# purposes. The default is "beats".
|
|
#client_id: beats
|
|
|
|
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
#ssl.enabled: true
|
|
|
|
# Optional SSL configuration options. SSL is off by default.
|
|
# List of root certificates for HTTPS server verifications
|
|
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
|
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
# and certificates will be accepted. In this mode, SSL based connections are
|
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
# `full`.
|
|
#ssl.verification_mode: full
|
|
|
|
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
# 1.2 are enabled.
|
|
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
|
|
# Certificate for SSL client authentication
|
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
|
|
# Client Certificate Key
|
|
#ssl.key: "/etc/pki/client/cert.key"
|
|
|
|
# Optional passphrase for decrypting the Certificate Key.
|
|
#ssl.key_passphrase: ''
|
|
|
|
# Configure cipher suites to be used for SSL connections
|
|
#ssl.cipher_suites: []
|
|
|
|
# Configure curve types for ECDHE based cipher suites
|
|
#ssl.curve_types: []
|
|
|
|
# Configure what types of renegotiation are supported. Valid options are
|
|
# never, once, and freely. Default is never.
|
|
#ssl.renegotiation: never
|
|
|
|
#------------------------------- Redis output ----------------------------------
|
|
#output.redis:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# The list of Redis servers to connect to. If load balancing is enabled, the
|
|
# events are distributed to the servers in the list. If one server becomes
|
|
# unreachable, the events are distributed to the reachable servers only.
|
|
#hosts: ["localhost:6379"]
|
|
|
|
# The Redis port to use if hosts does not contain a port number. The default
|
|
# is 6379.
|
|
#port: 6379
|
|
|
|
# The name of the Redis list or channel the events are published to. The
|
|
# default is auditbeat.
|
|
#key: auditbeat
|
|
|
|
# The password to authenticate with. The default is no authentication.
|
|
#password:
|
|
|
|
# The Redis database number where the events are published. The default is 0.
|
|
#db: 0
|
|
|
|
# The Redis data type to use for publishing events. If the data type is list,
|
|
# the Redis RPUSH command is used. If the data type is channel, the Redis
|
|
# PUBLISH command is used. The default value is list.
|
|
#datatype: list
|
|
|
|
# The number of workers to use for each host configured to publish events to
|
|
# Redis. Use this setting along with the loadbalance option. For example, if
|
|
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
|
|
# host).
|
|
#worker: 1
|
|
|
|
# If set to true and multiple hosts or workers are configured, the output
|
|
# plugin load balances published events onto all Redis hosts. If set to false,
|
|
# the output plugin sends all events to only one host (determined at random)
|
|
# and will switch to another host if the currently selected one becomes
|
|
# unreachable. The default value is true.
|
|
#loadbalance: true
|
|
|
|
# The Redis connection timeout in seconds. The default is 5 seconds.
|
|
#timeout: 5s
|
|
|
|
# The number of times to retry publishing an event after a publishing failure.
|
|
# After the specified number of retries, the events are typically dropped.
|
|
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
|
# all events are published. Set max_retries to a value less than 0 to retry
|
|
# until all events are published. The default is 3.
|
|
#max_retries: 3
|
|
|
|
# The maximum number of events to bulk in a single Redis request or pipeline.
|
|
# The default is 2048.
|
|
#bulk_max_size: 2048
|
|
|
|
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
|
|
# value must be a URL with a scheme of socks5://.
|
|
#proxy_url:
|
|
|
|
# This option determines whether Redis hostnames are resolved locally when
|
|
# using a proxy. The default value is false, which means that name resolution
|
|
# occurs on the proxy server.
|
|
#proxy_use_local_resolver: false
|
|
|
|
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
#ssl.enabled: true
|
|
|
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
# and certificates will be accepted. In this mode, SSL based connections are
|
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
# `full`.
|
|
#ssl.verification_mode: full
|
|
|
|
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
# 1.2 are enabled.
|
|
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
|
|
# Optional SSL configuration options. SSL is off by default.
|
|
# List of root certificates for HTTPS server verifications
|
|
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
|
|
# Certificate for SSL client authentication
|
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
|
|
# Client Certificate Key
|
|
#ssl.key: "/etc/pki/client/cert.key"
|
|
|
|
# Optional passphrase for decrypting the Certificate Key.
|
|
#ssl.key_passphrase: ''
|
|
|
|
# Configure cipher suites to be used for SSL connections
|
|
#ssl.cipher_suites: []
|
|
|
|
# Configure curve types for ECDHE based cipher suites
|
|
#ssl.curve_types: []
|
|
|
|
# Configure what types of renegotiation are supported. Valid options are
|
|
# never, once, and freely. Default is never.
|
|
#ssl.renegotiation: never
|
|
|
|
#------------------------------- File output -----------------------------------
|
|
#output.file:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# Path to the directory where to save the generated files. The option is
|
|
# mandatory.
|
|
#path: "/tmp/auditbeat"
|
|
|
|
# Name of the generated files. The default is `auditbeat` and it generates
|
|
# files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc.
|
|
#filename: auditbeat
|
|
|
|
# Maximum size in kilobytes of each file. When this size is reached, and on
|
|
# every auditbeat restart, the files are rotated. The default value is 10240
|
|
# kB.
|
|
#rotate_every_kb: 10000
|
|
|
|
# Maximum number of files under path. When this number of files is reached,
|
|
# the oldest file is deleted and the rest are shifted from last to first. The
|
|
# default is 7 files.
|
|
#number_of_files: 7
|
|
|
|
# Permissions to use for file creation. The default is 0600.
|
|
#permissions: 0600
|
|
|
|
|
|
#----------------------------- Console output ---------------------------------
|
|
#output.console:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# Pretty print json event
|
|
#pretty: false
|
|
|
|
#================================= Paths ======================================
|
|
|
|
# The home path for the auditbeat installation. This is the default base path
|
|
# for all other path settings and for miscellaneous files that come with the
|
|
# distribution (for example, the sample dashboards).
|
|
# If not set by a CLI flag or in the configuration file, the default for the
|
|
# home path is the location of the binary.
|
|
#path.home:
|
|
|
|
# The configuration path for the auditbeat installation. This is the default
|
|
# base path for configuration files, including the main YAML configuration file
|
|
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
|
# configuration file, the default for the configuration path is the home path.
|
|
#path.config: ${path.home}
|
|
|
|
# The data path for the auditbeat installation. This is the default base path
|
|
# for all the files in which auditbeat needs to store its data. If not set by a
|
|
# CLI flag or in the configuration file, the default for the data path is a data
|
|
# subdirectory inside the home path.
|
|
#path.data: ${path.home}/data
|
|
|
|
# The logs path for a auditbeat installation. This is the default location for
|
|
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
|
# the default for the logs path is a logs subdirectory inside the home path.
|
|
#path.logs: ${path.home}/logs
|
|
|
|
#============================== Dashboards =====================================
|
|
# These settings control loading the sample dashboards to the Kibana index. Loading
|
|
# the dashboards are disabled by default and can be enabled either by setting the
|
|
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
|
setup.dashboards.enabled: true
|
|
|
|
# The directory from where to read the dashboards. The default is the `kibana`
|
|
# folder in the home path.
|
|
#setup.dashboards.directory: ${path.home}/kibana
|
|
|
|
# The URL from where to download the dashboards archive. It is used instead of
|
|
# the directory if it has a value.
|
|
#setup.dashboards.url:
|
|
|
|
# The file archive (zip file) from where to read the dashboards. It is used instead
|
|
# of the directory when it has a value.
|
|
#setup.dashboards.file:
|
|
|
|
# In case the archive contains the dashboards from multiple Beats, this lets you
|
|
# select which one to load. You can load all the dashboards in the archive by
|
|
# setting this to the empty string.
|
|
#setup.dashboards.beat: auditbeat
|
|
|
|
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
|
#setup.dashboards.kibana_index: .kibana
|
|
|
|
# The Elasticsearch index name. This overwrites the index name defined in the
|
|
# dashboards and index pattern. Example: testbeat-*
|
|
#setup.dashboards.index:
|
|
|
|
# Always use the Kibana API for loading the dashboards instead of autodetecting
|
|
# how to install the dashboards by first querying Elasticsearch.
|
|
#setup.dashboards.always_kibana: false
|
|
|
|
# If true and Kibana is not reachable at the time when dashboards are loaded,
|
|
# it will retry to reconnect to Kibana instead of exiting with an error.
|
|
#setup.dashboards.retry.enabled: false
|
|
|
|
# Duration interval between Kibana connection retries.
|
|
#setup.dashboards.retry.interval: 1s
|
|
|
|
# Maximum number of retries before exiting with an error, 0 for unlimited retrying.
|
|
#setup.dashboards.retry.maximum: 0
|
|
|
|
|
|
#============================== Template =====================================
|
|
|
|
# A template is used to set the mapping in Elasticsearch
|
|
# By default template loading is enabled and the template is loaded.
|
|
# These settings can be adjusted to load your own template or overwrite existing ones.
|
|
|
|
# Set to false to disable template loading.
|
|
setup.template.enabled: true
|
|
|
|
# Template name. By default the template name is "auditbeat-%{[beat.version]}"
|
|
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
|
setup.template.name: "auditbeat-%{[beat.version]}"
|
|
|
|
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
|
|
# The first part is the version of the beat and then -* is used to match all daily indices.
|
|
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
|
setup.template.pattern: "auditbeat-%{[beat.version]}-*"
|
|
|
|
# Path to fields.yml file to generate the template
|
|
setup.template.fields: "${path.config}/fields.yml"
|
|
|
|
# Overwrite existing template
|
|
setup.template.overwrite: true
|
|
|
|
{% include 'templates/_include_setup_template.yml.j2' %}
|
|
|
|
#============================== Kibana =====================================
|
|
{% include 'templates/_include_kibana_setup.yml.j2' %}
|
|
|
|
#================================ Logging ======================================
|
|
{% with beat_name="auditbeat" %}
|
|
{% include 'templates/_include_beat_logging.yml.j2' %}
|
|
{% endwith %}
|
|
|
|
#============================== Xpack Monitoring =====================================
|
|
# auditbeat can export internal metrics to a central Elasticsearch monitoring cluster.
|
|
# This requires xpack monitoring to be enabled in Elasticsearch.
|
|
# The reporting is disabled by default.
|
|
|
|
# Set to true to enable the monitoring reporter.
|
|
#xpack.monitoring.enabled: false
|
|
|
|
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
|
# Elasticsearch output are accepted here as well. Any setting that is not set is
|
|
# automatically inherited from the Elasticsearch output configuration, so if you
|
|
# have the Elasticsearch output configured, you can simply uncomment the
|
|
# following line, and leave the rest commented out.
|
|
#xpack.monitoring.elasticsearch:
|
|
|
|
# Array of hosts to connect to.
|
|
# Scheme and port can be left out and will be set to the default (http and 9200)
|
|
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
|
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
|
#hosts: ["localhost:9200"]
|
|
|
|
# Set gzip compression level.
|
|
#compression_level: 0
|
|
|
|
# Optional protocol and basic auth credentials.
|
|
#protocol: "https"
|
|
#username: "beats_system"
|
|
#password: "changeme"
|
|
|
|
# Dictionary of HTTP parameters to pass within the url with index operations.
|
|
#parameters:
|
|
#param1: value1
|
|
#param2: value2
|
|
|
|
# Custom HTTP headers to add to each request
|
|
#headers:
|
|
# X-My-Header: Contents of the header
|
|
|
|
# Proxy server url
|
|
#proxy_url: http://proxy:3128
|
|
|
|
# The number of times a particular Elasticsearch index operation is attempted. If
|
|
# the indexing operation doesn't succeed after this many retries, the events are
|
|
# dropped. The default is 3.
|
|
#max_retries: 3
|
|
|
|
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
|
# The default is 50.
|
|
#bulk_max_size: 50
|
|
|
|
# Configure http request timeout before failing an request to Elasticsearch.
|
|
#timeout: 90
|
|
|
|
# Use SSL settings for HTTPS.
|
|
#ssl.enabled: true
|
|
|
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
# and certificates will be accepted. In this mode, SSL based connections are
|
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
# `full`.
|
|
#ssl.verification_mode: full
|
|
|
|
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
# 1.2 are enabled.
|
|
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
|
|
# SSL configuration. By default is off.
|
|
# List of root certificates for HTTPS server verifications
|
|
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
|
|
# Certificate for SSL client authentication
|
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
|
|
# Client Certificate Key
|
|
#ssl.key: "/etc/pki/client/cert.key"
|
|
|
|
# Optional passphrase for decrypting the Certificate Key.
|
|
#ssl.key_passphrase: ''
|
|
|
|
# Configure cipher suites to be used for SSL connections
|
|
#ssl.cipher_suites: []
|
|
|
|
# Configure curve types for ECDHE based cipher suites
|
|
#ssl.curve_types: []
|
|
|
|
# Configure what types of renegotiation are supported. Valid options are
|
|
# never, once, and freely. Default is never.
|
|
#ssl.renegotiation: never
|
|
|
|
#================================ HTTP Endpoint ======================================
|
|
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
|
# reasons the endpoint is disabled by default. This feature is currently experimental.
|
|
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
|
|
# append ?pretty to the URL.
|
|
|
|
# Defines if the HTTP endpoint is enabled.
|
|
#http.enabled: false
|
|
|
|
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
|
|
#http.host: localhost
|
|
|
|
# Port on which the HTTP endpoint will bind. Default is 5066.
|
|
#http.port: 5066
|