diff --git a/elk_metrics_6x/common_task_install_go1.10.1.yml b/elk_metrics_6x/common_task_install_go1.10.1.yml new file mode 100644 index 00000000..71abb7d4 --- /dev/null +++ b/elk_metrics_6x/common_task_install_go1.10.1.yml @@ -0,0 +1,56 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Check for go + stat: + path: /opt/go1.10.1/go/bin/go + register: go_path + +- name: Install go + block: + - name: GET go + get_url: + url: "https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz" + dest: "/opt/go1.10.1.linux-amd64.tar.gz" + checksum: "sha256:72d820dec546752e5a8303b33b009079c15c2390ce76d67cf514991646c6127b" + + - name: Create go directory + file: + path: "/opt/go1.10.1" + state: directory + + - name: Unarchive go + unarchive: + src: "/opt/go1.10.1.linux-amd64.tar.gz" + dest: "/opt/go1.10.1" + remote_src: yes + + - name: Create go defaults file + copy: + content: | + GOROOT=/opt/go1.10.1/go + GOPATH=/usr/local + PATH=${PATH}:${GOROOT}/bin + dest: /etc/default/go1.10.1 + when: + - (elk_package_state | default('present')) == 'present' + - not go_path.stat.exists | bool + +- name: Remove go + file: + path: "/opt/go1.10.1" + state: absent + when: + - (elk_package_state | default('present')) == 'absent' diff --git a/elk_metrics_6x/installJournalbeat.yml b/elk_metrics_6x/installJournalbeat.yml new file mode 100644 index 00000000..e47947e8 --- /dev/null +++ b/elk_metrics_6x/installJournalbeat.yml @@ -0,0 +1,145 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Journalbeat + hosts: hosts + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + pre_tasks: + - include_tasks: common_task_install_go1.10.1.yml + + - name: Ensure libsystemd-dev is installed + apt: + name: "{{ item }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: true + with_items: + - git + - libsystemd-dev + tags: + - package_install + + - name: exit playbook after uninstall + meta: end_play + when: + - elk_package_state | default('present') == 'absent' + - ansible_service_mgr == "systemd" + + tasks: + - name: create the system group + group: + name: "journalbeat" + state: "present" + system: "yes" + + - name: Create the nova system user + user: + name: "journalbeat" + group: "journalbeat" + comment: "journalbeat user" + shell: "/bin/false" + createhome: "yes" + home: "/usr/share/journalbeat" + + - name: Create journalbeat data path + file: + path: "{{ item }}" + state: directory + owner: "journalbeat" + group: "journalbeat" + mode: "0755" + with_items: + - "/usr/share/journalbeat" + - "/var/lib/journalbeat" + - "/var/log/journalbeat" + - "/etc/journalbeat" + + - name: Install journalbeat + shell: | + . /etc/default/go1.10.1 + /opt/go1.10.1/go/bin/go get -v github.com/mheese/journalbeat + + - name: Drop journalbeat conf file + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: templates/journalbeat.yml.j2 + dest: /etc/journalbeat/journalbeat.yml + + - name: Run the systemd service role + include_role: + name: systemd_service + private: true + vars: + systemd_service_restart_changed: false + systemd_services: + - service_name: "journalbeat" + execstarts: + - /usr/local/bin/journalbeat + -c /etc/journalbeat/journalbeat.yml + -path.home /usr/share/journalbeat + -path.config /etc/journalbeat + -path.data /var/lib/journalbeat + -path.logs /var/log/journalbeat + config_overrides: + Service: + EnvironmentFile: "-/etc/default/go1.10.1" + Unit: + Documentation: https://github.com/mheese/journalbeat/blob/master/README.md + Wants: network-online.target + After: network-online.target + + - name: Enable and restart journalbeat + systemd: + name: "journalbeat" + enabled: true + state: restarted + daemon_reload: yes + + +## NOTE(cloudnull): This task is broken at this point due to missing +## configuration. Once the following issue +## [ https://github.com/mheese/journalbeat/issues/136 ] is +## resolved this should be uncommented. +# - name: Load Journalbeat Dashboards +# hosts: hosts[0] +# become: true +# vars_files: +# - vars/variables.yml +# tasks: +# - name: Load templates +# shell: >- +# {% set IP_ARR=[] %} +# {% for host in groups['elastic-logstash'] %} +# {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} +# {% endif %} +# {% endfor %} +# {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %} +# /usr/local/bin/journalbeat -setup +# -E 'output.logstash.enabled=false' +# -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}' +# -e -v +# register: templates +# until: templates | success +# retries: 3 +# delay: 2 +# tags: +# - beat-setup diff --git a/elk_metrics_6x/readme.rst b/elk_metrics_6x/readme.rst index ef2e4585..21bc9f0e 100644 --- a/elk_metrics_6x/readme.rst +++ b/elk_metrics_6x/readme.rst @@ -10,6 +10,16 @@ with topbeat to gather metrics from hosts metrics to the ELK cluster. **These playbooks require Ansible 2.4+.** +Before running these playbooks the ``systemd_service`` role is required and is +used in community roles. If these playbooks are being run in an +OpenStack-Ansible installation the required role will be resolved for you. If +the Installation is outside of OpenStack-Ansible, clone the role or add it to an +ansible role requirements file. + +.. code-block:: bash + + git clone https://github.com/openstack/ansible-role-systemd_service /etc/ansible/roles/systemd_service + OpenStack-Ansible Integration ----------------------------- diff --git a/elk_metrics_6x/site.yml b/elk_metrics_6x/site.yml index 86cef68b..5c421af6 100644 --- a/elk_metrics_6x/site.yml +++ b/elk_metrics_6x/site.yml @@ -23,3 +23,4 @@ - import_playbook: installAuditbeat.yml - import_playbook: installHeartbeat.yml - import_playbook: installFilebeat.yml +- import_playbook: installJournalbeat.yml diff --git a/elk_metrics_6x/templates/journalbeat.yml.j2 b/elk_metrics_6x/templates/journalbeat.yml.j2 new file mode 100644 index 00000000..4ca71f34 --- /dev/null +++ b/elk_metrics_6x/templates/journalbeat.yml.j2 @@ -0,0 +1,704 @@ +#======================== Journalbeat Configuration ============================ + +journalbeat: + # What position in journald to seek to at start up + # options: cursor, tail, head (defaults to tail) + #seek_position: tail + + # If seek_position is set to cursor and seeking to cursor fails + # fall back to this method. If set to none will it will exit + # options: tail, head, none (defaults to tail) + #cursor_seek_fallback: tail + + # Store the cursor of the successfully published events + #write_cursor_state: true + + # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state") + #cursor_state_file: .journalbeat-cursor-state + + # How frequently should we save the cursor to disk (defaults to 5s) + #cursor_flush_period: 5s + + # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue") + #pending_queue.file: .journalbeat-pending-queue + + # How frequently should we save the queue to disk (defaults to 1s). + # Pending queue represents the WAL of events queued to be published + # or being published and waiting for acknowledgement. In case of a + # regular restart of journalbeat all the events not yet acknowledged + # will be flushed to disk during the shutdown. + # In case of disaster most probably journalbeat won't get a chance to shutdown + # itself gracefully and this flush period option will serve you as a + # backup creation frequency option. + #pending_queue.flush_period: 1s + + # Size of the buffered queue for the published and acknowledged messages + #pending_queue.completed_queue_size: 8192 + + # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message" + # (defaults to false) + #clean_field_names: false + + # All journal entries are strings by default. You can try to convert them to numbers. + # (defaults to false) + #convert_to_numbers: false + + # Store all the fields of the Systemd Journal entry under this field + # Can be almost any string suitable to be a field name of an ElasticSearch document. + # Dots can be used to create nested fields. + # Two exceptions: + # - no repeated dots; + # - no trailing dots, e.g. "journal..field_name." will fail + # (defaults to "" hence stores on the upper level of the event) + #move_metadata_to_field: "" + + # Specific units to monitor. + #units: ["httpd.service"] + + # gather kernel logs when units are provided + kernel: true + + # Custom Journal patterns to match on other than UNIT + #match_patterns: ["FIELD=value"] + + # Specificies syslog identifiers to monitor. + #identifiers: ["docker"] + + # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths. + # If you want to open Journal from directory just pass an array consisting of one element + # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html + # By default this setting is empty thus journalbeat will attempt to find all journal files automatically + #journal_paths: ["/var/log/journal"] + + #default_type: journal + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +name: journalbeat + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# The internal queue size for bulk events in the processing pipeline. +# Do not modify this value. +#bulk_queue_size: 0 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, and DigitalOcean. +# +#processors: +#- add_cloud_metadata: +# + +#================================ Outputs ====================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port | string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "beatname" plus date + # and generates [beatname-]YYYY.MM.DD keys. + #index: "beatname-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1s + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones. + + # Set to false to disable template loading. + #template.enabled: true + + # Template name. By default the template name is beatname. + #template.name: "beatname" + + # Path to template file + #template.path: "${path.config}/beatname.template.json" + + # Overwrite existing template + #template.overwrite: false + + # If set to true, beatname checks the Elasticsearch version at connect time, and if it + # is 2.x, it loads the file specified by the template.versions.2x.path setting. The + # default is true. + #template.versions.2x.enabled: true + + # Path to the Elasticsearch 2.x version of the template file. + #template.versions.2x.path: "${path.config}/beatname.template-es2x.json" + + # If set to true, beatname checks the Elasticsearch version at connect time, and if it + # is 6.x, it loads the file specified by the template.versions.6x.path setting. The + # default is true. + #template.versions.6x.enabled: true + + # Path to the Elasticsearch 6.x version of the template file. + #template.versions.6x.path: "${path.config}/beatname.template-es6x.json" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + +#----------------------------- Logstash output --------------------------------- +{% include 'templates/_include_log_stash_output.yml.j2' %} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version beatname is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The number of seconds to wait for new events between two producer API calls. + #flush_interval: 1s + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is beatname. + #key: beatname + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/beatname" + + # Name of the generated files. The default is `beatname` and it generates + # files: `beatname`, `beatname.1`, `beatname.2`, etc. + #filename: beatname + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every beatname restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the beatname installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the beatname installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the beatname installation. This is the default base path +# for all the files in which beatname needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a beatname installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag. +dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#dashboards.url: + +# The directory from where to read the dashboards. It is used instead of the URL +# when it has a value. +#dashboards.directory: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the URL when it has a value. +#dashboards.file: + +# If this option is enabled, the snapshot URL is used instead of the default URL. +#dashboards.snapshot: false + +# The URL from where to download the snapshot version of the dashboards. By default +# this has a value which is computed based on the Beat name and version. +#dashboards.snapshot_url + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#dashboards.beat: beatname + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#dashboards.index: + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: true + +# Template name. By default the template name is "journalbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.name: "journalbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.pattern: "journalbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: true + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: 3 + codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== +{% include 'templates/_include_kibana_setup.yml.j2' %} + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, journalbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/journalbeat + + # The name of the files where the logs are written to. + name: journalbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false