Use elasticsearch coordinator nodes as smart LBs

Elasticsearch can be used as a smart load balancer for all traffic
which will remove the requirement for a VIP and move the cluster to a
mesh topology. All of the Kibana nodes will now run elasticsearch as
cordonator.

* Kibana will now connect to elasticsearch on localhost.
* All of the beats have been setup to use use the new mesh topology.
* jvm memory management has been updated to reflect the additional
  services.

More on node assigments can be found here:
* https://www.elastic.co/guide/en/elasticsearch/reference/6.2/modules-node.html#modules-node

* The readme has been updated to reflect these changes.

Change-Id: I769e0251072f5dbde56fcce7753236d37d5c3b19
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-06-12 14:37:08 -05:00
parent 582e5199f3
commit bc2937d9c9
No known key found for this signature in database
GPG Key ID: 9443251A787B9FB3
23 changed files with 361 additions and 238 deletions

View File

@ -0,0 +1,70 @@
---
# the master node count takes half the available nodes or sets it's self as 1
- name: Node count fact
set_fact:
storage_node_count: "{{ groups['elastic-logstash'] | length }}"
- name: Master node pre-count fact
set_fact:
_master_node_count: "{{ ((storage_node_count | int) > 1) | ternary((((storage_node_count | int) // 2) | int), 1) }}"
# if the master node count is even, add one to it otherwise use the provided value
- name: Master node count fact
set_fact:
master_node_count: "{{ ((_master_node_count | int) % 2 != 0) | ternary((_master_node_count | int), ((_master_node_count | int) + 1)) }}"
- name: Data nodes fact
set_fact:
data_nodes: "{{ (groups['elastic-logstash'][:master_node_count | int] + groups['elastic-logstash'][master_node_count | int::2]) }}"
master_nodes: "{{ groups['elastic-logstash'][:master_node_count | int] }}"
coordination_nodes: |-
{% set nodes=[] %}
{% for host in groups['kibana'] %}
{% set _ = nodes.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{{ nodes }}
zen_nodes: |-
{% set nodes=[] %}
{% for host in (groups['elastic-logstash'] | union(groups['kibana'])) %}
{% set _ = nodes.insert(loop.index, (hostvars[host]['ansible_host'] | string)) %}
{% endfor %}
{{ nodes }}
- name: Data node count fact
set_fact:
data_node_count: "{{ data_nodes | length }}"
# if the master node count is even, add one to it otherwise use the provided value
# set the data nodes to be all master and alternate through the remaining nodes
- name: Node enablement
set_fact:
master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}"
data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}"
# Set a data node facts. The data nodes, in the case of elasticsearch are also
# ingest nodes.
- name: Set data nodes
set_fact:
elasticsearch_data_hosts: |-
{% if inventory_hostname in data_nodes %}
{% set data_hosts = ['127.0.0.1:' + (elastic_port | string)] %}
{% else %}
{% set nodes=[] %}
{% for host in data_nodes %}
{% set _ = nodes.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
{% endif %}
{{ data_hosts }}
logstash_data_hosts: |-
{% if inventory_hostname in data_nodes %}
{% set data_hosts = ['127.0.0.1:' + (logstash_beat_input_port | string)] %}
{% else %}
{% set nodes=[] %}
{% for host in data_nodes %}
{% set _ = nodes.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (logstash_beat_input_port | string))) %}
{% endfor %}
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
{% endif %}
{{ data_hosts }}

View File

@ -1,3 +1,15 @@
# For the puposes of this example, the kibana nodes have been added to
# different host machines that the logging nodes. The intention here
# is to show that the different components can scale independently of
# one another.
kibana_hosts:
infra01:
ip: 172.22.8.24
infra02:
ip: 172.22.8.25
infra03:
ip: 172.22.8.26
elastic-logstash_hosts:
logging01:
ip: 172.22.8.27
@ -6,10 +18,6 @@ elastic-logstash_hosts:
logging03:
ip: 172.22.8.29
kibana_hosts:
logging01:
ip: 172.22.8.27
apm-server_hosts:
logging01:
ip: 172.22.8.27

View File

@ -11,6 +11,9 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure apm-server is installed
@ -48,14 +51,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
apm-server setup
{{ item }}
-E 'apm-server.host=localhost:8200'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -11,6 +11,9 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure Auditbeat is installed
@ -58,14 +61,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
auditbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -1,6 +1,6 @@
---
- name: Install Elastic Search
hosts: "elastic-logstash"
hosts: "elastic-logstash:kibana"
become: true
vars_files:
@ -8,16 +8,34 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- name: Set half memory fact
- name: Set memory fact to half
set_fact:
h_mem: "{{ ansible_memtotal_mb // 2 }}"
h_mem: "{{ (ansible_memtotal_mb | int) // 2 }}"
tags:
- always
- name: Set logstash facts
set_fact:
elastic_heap_size: "{{ ((h_mem | int) > 30720) | ternary(30720, h_mem) }}"
tags:
- always
- name: Set kibana elasticsearch facts
block:
- name: Set kibana as elasticsearch coordinators
set_fact:
elasticsearch_node_master: false
elasticsearch_node_data: false
elasticsearch_node_ingest: false
elastic_heap_size: "{{ (elastic_heap_size | int) // 3 }}"
when:
- elastic_heap_size is undefined
- inventory_hostname in groups['kibana']
tags:
- always
- name: Configure systcl vm.max_map_count=262144 on container hosts
sysctl:

View File

@ -11,6 +11,9 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure Filebeat is installed
@ -28,7 +31,6 @@
when:
- elk_package_state | default('present') == 'absent'
tasks:
- name: Check for apache
stat:
path: /etc/apache2
@ -155,14 +157,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
filebeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -11,6 +11,9 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure heartbeat is installed
@ -47,14 +50,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
heartbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -25,6 +25,9 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_go1.10.1.yml
- name: Ensure libsystemd-dev is installed
@ -44,7 +47,6 @@
- elk_package_state | default('present') == 'absent'
- ansible_service_mgr == "systemd"
tasks:
- name: create the system group
group:
name: "journalbeat"
@ -130,13 +132,9 @@
# tasks:
# - name: Load templates
# shell: >-
# {% set IP_ARR=[] %}
# {% for host in groups['elastic-logstash'] %}
# {% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
# {% endfor %}
# /usr/local/bin/journalbeat -setup
# -E 'output.logstash.enabled=false'
# -E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
# -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
# -e -v
# register: templates
# until: templates is success

View File

@ -7,16 +7,21 @@
environment: "{{ deployment_environment_variables | default({}) }}"
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- name: Set quarter memory fact
set_fact:
q_mem: "{{ ansible_memtotal_mb // 4 }}"
q_mem: "{{ (ansible_memtotal_mb | int) // 4 }}"
tags:
- always
- name: Set logstash facts
set_fact:
elastic_heap_size: "{{ ((q_mem | int) > 30720) | ternary(30720, q_mem) }}"
when:
- elastic_heap_size is undefined
tags:
- always
- include_tasks: common_task_install_elk_repo.yml
@ -71,7 +76,7 @@
src: templates/10-syslog-filter.conf.j2
dest: /etc/logstash/conf.d/10-syslog-filter.conf
- name: Drop Logstash conf for beats output
- name: Drop Logstash conf for elasticsearch output
template:
src: templates/99-elasticsearch-output.conf.j2
dest: /etc/logstash/conf.d/99-elasticsearch-output.conf

View File

@ -11,6 +11,9 @@
- vars/variables.yml
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure Metricsbeat is installed
@ -26,7 +29,6 @@
when:
- elk_package_state | default('present') == 'absent'
tasks:
- name: Check for apache
stat:
path: /etc/apache2/sites-available
@ -164,14 +166,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
metricbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -11,6 +11,9 @@
- vars/variables.yml
pre_tasks:
- include_tasks: common_task_data_node_hosts.yml
tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure packetbeat is installed
@ -50,14 +53,10 @@
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
packetbeat setup
{{ item }}
-E 'output.logstash.enabled=false'
-E 'output.elasticsearch.hosts={{ IP_ARR | to_json }}'
-E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}'
-e -v
with_items:
- "--template"

View File

@ -2,16 +2,16 @@ Install ELK with beats to gather metrics
########################################
:tags: openstack, ansible
..
About this repository
---------------------
This set of playbooks will deploy elk cluster (Elasticsearch, Logstash, Kibana)
with topbeat to gather metrics from hosts metrics to the ELK cluster.
This set of playbooks will deploy an elastic stack cluster (Elasticsearch,
Logstash, Kibana) with beats to gather metrics from hosts and store them into
the elastic stack.
**These playbooks require Ansible 2.5+.**
OpenStack-Ansible Integration
-----------------------------
@ -20,47 +20,56 @@ an OpenStack-Ansible deployment. For a simple example of standalone inventory,
see ``inventory.example.yml``.
Optional | Load balancer VIP address
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to use multi-node elasticsearch a loadbalancer is required. Haproxy can
provide the load balancer functionality needed. The option
`internal_lb_vip_address` is used as the endpoint (virtual IP address) services
like Kibana will use when connecting to elasticsearch. If this option is
omitted, the first node in the elasticsearch cluster will be used.
Optional | configure haproxy endpoints
Optional | Load balancer configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Edit the `/etc/openstack_deploy/user_variables.yml` file and add the following
Configure the Elasticsearch endpoints:
While the Elastic stack cluster does not need a load balancer to scale, it is
useful when accessing the Elasticsearch cluster using external tooling. Tools
like OSProfiler, Grafana, etc will all benefit from being able to interact
with Elasticsearch using the load balancer. This provides better fault
tolerance especially when compared to connecting to a single node.
The following section can be added to the `haproxy_extra_services` list to
create an Elasticsearch backend. The ingress port used to connect to
Elasticsearch is **9201**. The backend port is **9200**. If this backend is
setup make sure you set the `internal_lb_vip_address` on the CLI or within a
known variable file which will be sourced at runtime. If using HAProxy, edit
the `/etc/openstack_deploy/user_variables.yml` file and add the following
lines.
.. code-block:: yaml
haproxy_extra_services:
- service:
haproxy_service_name: kibana
haproxy_ssl: False
haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}"
haproxy_port: 81 # This is set using the "kibana_nginx_port" variable
haproxy_balance_type: tcp
- service:
haproxy_service_name: elastic-logstash
haproxy_ssl: False
haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}"
haproxy_port: 5044 # This is set using the "logstash_beat_input_port" variable
haproxy_balance_type: tcp
- service:
haproxy_service_name: elastic-logstash
haproxy_ssl: False
haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}"
haproxy_backend_nodes: "{{ groups['Kibana'] | default([]) }}" # Kibana nodes are also Elasticsearch coordination nodes
haproxy_port: 9201 # This is set using the "elastic_hap_port" variable
haproxy_check_port: 9200 # This is set using the "elastic_port" variable
haproxy_backend_port: 9200 # This is set using the "elastic_port" variable
haproxy_balance_type: tcp
Configure the Kibana endpoints:
It is recommended to use a load balancer with Kibana. Like Elasticsearch, a
load balancer is not required however without one users will need to directly
connect to a single Kibana node to access the dashboard. If a load balancer is
present it can provide a highly available address for users to access a pool
of Kibana nodes which will provide a much better user experience. If using
HAProxy, edit the `/etc/openstack_deploy/user_variables.yml` file and add the
following lines.
.. code-block:: yaml
haproxy_extra_services:
- service:
haproxy_service_name: Kibana
haproxy_ssl: False
haproxy_backend_nodes: "{{ groups['Kibana'] | default([]) }}"
haproxy_port: 81 # This is set using the "Kibana_nginx_port" variable
haproxy_balance_type: tcp
Optional | add OSProfiler to an OpenStack-Ansible deployment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -78,7 +87,7 @@ OpenStack-Ansible deployment.
enabled: true
trace_sqlalchemy: true
hmac_keys: "UNIQUE_HMACKEY" # This needs to be set consistently throughout the deployment
connection_string: "elasticsearch://{{ internal_lb_vip_address }}:9201"
connection_string: "Elasticsearch://{{ internal_lb_vip_address }}:9201"
es_doc_type: "notification"
es_scroll_time: "2m"
es_scroll_size: "10000"
@ -125,7 +134,7 @@ can be added or dynamcally appended to a given hash using `yaml` tags.
profiler:
enabled: true
hmac_keys: "UNIQUE_HMACKEY" # This needs to be set consistently throughout the deployment
connection_string: "elasticsearch://{{ internal_lb_vip_address }}:9201"
connection_string: "Elasticsearch://{{ internal_lb_vip_address }}:9201"
es_doc_type: "notification"
es_scroll_time: "2m"
es_scroll_size: "10000"
@ -138,18 +147,21 @@ can be added or dynamcally appended to a given hash using `yaml` tags.
<<: *os_profiler
While the `osprofiler` and `elasticsearch` libraries should be installed
While the `osprofiler` and `Elasticsearch` libraries should be installed
within all virtual environments by default, it's possible they're missing
within a given deployment. To install these dependencies throughout the
cluster without having to invoke a *repo-build* run the following *adhoc*
Ansible command can by used.
The version of the Elasticsearch python library should match major version of
of Elasticsearch being deployed within the environment.
.. code-block:: bash
ansible -m shell -a 'find /openstack/venvs/* -maxdepth 0 -type d -exec {}/bin/pip install osprofiler elasticsearch \;' all
ansible -m shell -a 'find /openstack/venvs/* -maxdepth 0 -type d -exec {}/bin/pip install osprofiler "elasticsearch>=6.0.0,<7.0.0" --isolated \;' all
Once the overides are inplace the **openstack-ansible** playbooks will need to
Once the overrides are in-place the **openstack-ansible** playbooks will need to
be rerun. To simply inject these options into the system a deployer will be able
to use the `*-config` tags that are apart of all `os_*` roles. The following
example will run the **config** tag on **ALL** openstack playbooks.
@ -220,8 +232,8 @@ Copy the conf.d file into place
cp conf.d/elk.yml /etc/openstack_deploy/conf.d/
In **elk.yml**, list your logging hosts under elastic-logstash_hosts to create
the elasticsearch cluster in multiple containers and one logging host under
kibana_hosts to create the kibana container
the Elasticsearch cluster in multiple containers and one logging host under
`kibana_hosts` to create the Kibana container
.. code-block:: bash
@ -232,7 +244,7 @@ Create the containers
.. code-block:: bash
cd /opt/openstack-ansible/playbooks
openstack-ansible lxc-containers-create.yml -e 'container_group=elastic-logstash:kibana:apm-server'
openstack-ansible lxc-containers-create.yml -e 'container_group=elastic-logstash:Kibana:apm-server'
Deploying | Installing with embedded Ansible
@ -271,8 +283,8 @@ environment variable `ANSIBLE_ACTION_PLUGINS` or through the use of an
Deploying | The environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install master/data elasticsearch nodes on the elastic-logstash containers,
deploy logstash, deploy kibana, and then deploy all of the service beats.
Install master/data Elasticsearch nodes on the elastic-logstash containers,
deploy logstash, deploy Kibana, and then deploy all of the service beats.
.. code-block:: bash
@ -296,10 +308,6 @@ domain sockets. Any /tmp/*uwsgi-stats.sock will be picked up by Metricsbeat.
.. code-block:: yaml
nova_api_metadata_uwsgi_ini_overrides:
uwsgi:
stats: "/tmp/nova-api-metadata-uwsgi-stats.sock"
keystone_uwsgi_ini_overrides:
uwsgi:
stats: "/tmp/keystone-uwsgi-stats.sock"
@ -348,8 +356,14 @@ domain sockets. Any /tmp/*uwsgi-stats.sock will be picked up by Metricsbeat.
uwsgi:
stats: "/tmp/magnum-api-uwsgi-stats.sock"
Rerun all of the **openstack-ansible** playbooks to enable these stats. Use the *-config
tags on all of the `os_*` roles.
Rerun all of the **openstack-ansible** playbooks to enable these stats. Use
the `${service_name}-config` tags on all of the `os_*` roles. It's possible to
auto-generate the tags list with the following command.
.. code-block:: bash
openstack-ansible setup-openstack.yml --tags "$(cat setup-openstack.yml | grep -wo 'os-.*' | awk -F'-' '{print $2 "-config"}' | tr '\n' ',')"
Optional | add Grafana visualizations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -384,4 +398,4 @@ If everything goes bad, you can clean up with the following command
.. code-block:: bash
openstack-ansible /opt/openstack-ansible-ops/elk_metrics_6x/site.yml -e "elk_package_state=absent" --tags package_install
openstack-ansible /opt/openstack-ansible/playbooks/lxc-containers-destroy.yml --limit=kibana:elastic-logstash_all
openstack-ansible /opt/openstack-ansible/playbooks/lxc-containers-destroy.yml --limit=Kibana:elastic-logstash_all

View File

@ -14,6 +14,7 @@
# limitations under the License.
- import_playbook: installElastic.yml
- import_playbook: installCurator.yml
- import_playbook: installLogstash.yml
- import_playbook: installKibana.yml
- import_playbook: installAPMserver.yml

View File

@ -1,10 +1,6 @@
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
{% endfor %}
output {
elasticsearch {
hosts => {{ IP_ARR | to_json }}
hosts => {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"

View File

@ -0,0 +1,92 @@
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
# Boolean flag to enable or disable the output module.
enabled: true
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
hosts: {{ elasticsearch_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
# Set gzip compression level.
compression_level: 3
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
worker: 1
# Optional index name. The default is "apm" plus date
# and generates [apm-]YYYY.MM.DD keys.
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
# Optional ingest node pipeline. By default no pipeline will be used.
#pipeline: ""
# Optional HTTP Path
#path: "/elasticsearch"
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never

View File

@ -1,13 +1,9 @@
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (logstash_beat_input_port | string))) %}
{% endfor %}
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# The Logstash hosts
hosts: {{ IP_ARR | to_json }}
hosts: {{ logstash_data_hosts | shuffle(seed=inventory_hostname) | to_json }}
# Number of workers per Logstash host.
worker: 1
@ -34,6 +30,22 @@ output.logstash:
# if no error is encountered.
slow_start: true
{% set thread_pool_size = ansible_processor_cores * ((ansible_processor_threads_per_core > 0) | ternary(ansible_processor_threads_per_core, 1)) %}
# The maximum number of events to bulk in a single Logstash request. The
# default is the number of cores multiplied by the number of threads,
# the resultant is then multiplied again by 256 which results in a the defined
# bulk max size. If the Beat sends single events, the events are collected
# into batches. If the Beat publishes a large batch of events (larger than
# the value specified by bulk_max_size), the batch is split. Specifying a
# larger batch size can improve performance by lowering the overhead of
# sending events. However big batch sizes can also increase processing times,
# which might result in API errors, killed connections, timed-out publishing
# requests, and, ultimately, lower throughput. Setting bulk_max_size to values
# less than or equal to 0 disables the splitting of batches. When splitting
# is disabled, the queue decides on the number of events to be contained in a
# batch.
bulk_max_size: {{ thread_pool_size * 256 }}
{% if named_index is defined %}
# Optional index name. The default index name is set to {{ named_index }}
# in all lowercase.

View File

@ -100,101 +100,8 @@ apm-server:
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
{% if internal_lb_vip_address is defined %}
hosts: "http://{{ internal_lb_vip_address }}:{{ elastic_hap_port }}"
{% else %}
hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port | string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
{% endif %}
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
#worker: 1
# Optional index name. The default is "apm" plus date
# and generates [apm-]YYYY.MM.DD keys.
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
# Optional ingest node pipeline. By default no pipeline will be used.
#pipeline: ""
# Optional HTTP Path
#path: "/elasticsearch"
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#----------------------------- Logstash output ---------------------------------
{% include 'templates/_include_elasticsearch_output.yml.j2' %}
#================================= Paths ======================================

View File

@ -37,7 +37,7 @@ bootstrap.memory_lock: false
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
network.host: ["127.0.0.1", "{{ ansible_host }}"]
network.host: ["127.0.0.1", "{{ ansible_host }}", "{{ ansible_hostname }}"]
# Set a custom port for HTTP:
http.port: {{ elastic_port }}
@ -46,24 +46,29 @@ http.port: {{ elastic_port }}
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% set _ = IP_ARR.insert(loop.index, (hostvars[host]['ansible_host'] | string)) %}
{% endfor %}
{% set available_nodes = (groups['elastic-logstash'] | length) %}
{# the master node count takes half the available nodes or sets it's self as 1 #}
{% set _master_node_count = (available_nodes > 1) | ternary(((available_nodes // 2) | int), 1) %}
{# if the master node count is even, add one to it otherwise use the provided value #}
{% set master_node_count = ((_master_node_count | int) % 2 != 0) | ternary(_master_node_count, (_master_node_count + 1)) %}
discovery.zen.ping.unicast.hosts: {{ IP_ARR | to_json }}
#
# Node definitions can be seen here:
#<https://www.elastic.co/guide/en/elasticsearch/reference/6.2/modules-node.html>
discovery.zen.ping.unicast.hosts: {{ zen_nodes | shuffle(seed=inventory_hostname) | to_json }}
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
discovery.zen.minimum_master_nodes: {{ master_node_count | int }}
discovery.zen.minimum_master_nodes: {{ ((master_node_count | int) // 2) + 1 }}
# The first set of nodes in the master_node_count are marked as such
node.master: {{ (inventory_hostname in groups['elastic-logstash'][:master_node_count | int]) | ternary(true, false) }}
node.master: {{ elasticsearch_node_master | default(master_node) }}
# Every node in the master list and every other node after will be a data node
node.data: {{ (inventory_hostname in (groups['elastic-logstash'][:master_node_count| int] + groups['elastic-logstash'][master_node_count | int::2])) | ternary(true, false) }}
node.data: {{ elasticsearch_node_data | default(data_node) }}
# Ingest nodes can execute pre-processing pipelines. To override automatic
# determination, the option `elasticsearch_node_ingest` can be defined as a
# Boolean which will enable or disable ingest nodes. When using automatic
# determination, ingest nodes will follow data nodes.
#
# NOTE(cloudnull): The use of "search remote connect" will follow the enablement
# of an ingest nodes.
{% if elasticsearch_node_ingest is defined %}
node.ingest: {{ elasticsearch_node_ingest }}
search.remote.connect: {{ elasticsearch_node_ingest }}
{% else %}
node.ingest: {{ data_node }}
search.remote.connect: {{ data_node }}
{% endif %}
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
@ -72,7 +77,7 @@ node.data: {{ (inventory_hostname in (groups['elastic-logstash'][:master_node_co
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
gateway.recover_after_nodes: {{ master_node_count | int // 2 }}
gateway.recover_after_nodes: {{ ((master_node_count | int) // 2) + 1 }}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>

View File

@ -158,11 +158,7 @@ name: journalbeat
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
#{% set IP_ARR=[] %}
#{% for host in groups['elastic-logstash'] %}
#{% set _ = IP_ARR.insert(loop.index, ((hostvars[host]['ansible_host'] | string) + ":" + (elastic_port | string))) %}
#{% endfor %}
#hosts: {{ IP_ARR | to_json }}
#hosts: localhost:9200
# Set gzip compression level.
#compression_level: 0

View File

@ -2,9 +2,14 @@
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
{% if (not (elasticsearch_node_master | default(master_node)) | bool) and (not (elasticsearch_node_data | default(data_node)) | bool) %}
-Xms{{ (elastic_heap_size | int) // 2 }}m
-Xmx{{ (elastic_heap_size | int) // 2 }}m
{% else %}
-Xms{{ elastic_heap_size }}m
-Xmx{{ elastic_heap_size }}m
{% endif %}
################################################################
## Expert settings
@ -72,3 +77,6 @@
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${LS_GC_LOG_FILE}
# Disable log4j because its not supported by elastic
-Dlog4j2.disable.jmx=true

View File

@ -12,11 +12,8 @@
# server.maxPayloadBytes: 1048576
# The URL of the Elasticsearch instance to use for all your queries.
{% if internal_lb_vip_address is defined %}
elasticsearch.url: "http://{{ internal_lb_vip_address }}:{{ elastic_hap_port }}"
{% else %}
elasticsearch.url: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port }}"
{% endif %}
elasticsearch.url: "http://127.0.0.1:{{ elastic_port }}"
# When this settings value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
@ -71,7 +68,7 @@
# pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
# logging.dest: stdout
logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
# logging.silent: false

View File

@ -172,7 +172,7 @@ metricbeat.modules:
# namespace: example
#
##---------------------------- Elasticsearch Module ---------------------------
{% if inventory_hostname in groups['elastic-logstash'] | default([]) %}
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana']) | unique) %}
- module: elasticsearch
metricsets: ["node", "node_stats"]
period: 30s

View File

@ -24,6 +24,10 @@ elastic_journalbeat_retention: 14
elastic_metricbeat_retention: 3
elastic_packetbeat_retention: 3
# This is the URL external services can use to communicate with the
# elasticsearch cluster.
elastic_vip_url: "http://{{ internal_lb_vip_address is defined | ternary(internal_lb_vip_address + ':' + (elastic_hap_port | string), hostvars[groups['kibana'][0]]['ansible_host'] + ':' + (elastic_port | string)) }}"
# kibana vars
kibana_interface: 0.0.0.0
kibana_port: 5601
@ -286,7 +290,7 @@ grafana_datasources:
- name: "all-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""
@ -301,7 +305,7 @@ grafana_datasources:
- name: "auditbeat-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""
@ -316,7 +320,7 @@ grafana_datasources:
- name: "filebeat-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""
@ -331,7 +335,7 @@ grafana_datasources:
- name: "heartbeat-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""
@ -346,7 +350,7 @@ grafana_datasources:
- name: "metricbeat-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""
@ -361,7 +365,7 @@ grafana_datasources:
- name: "packetbeat-Elasticsearch"
type: "elasticsearch"
access: "proxy"
url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
url: "{{ elastic_vip_url }}"
basicAuth: false
basicAuthUser: ""
basicAuthPassword: ""