# Copyright 2017 The Openstack-Helm Authors. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Default values for fluentd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. release_group: null deployment: type: DaemonSet labels: fluentd: node_selector_key: openstack-control-plane node_selector_value: enabled prometheus_fluentd_exporter: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: fluentd: docker.io/openstackhelm/fluentd:debian-20190903 prometheus_fluentd_exporter: docker.io/bitnami/fluentd-exporter:0.2.0 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 helm_tests: docker.io/openstackhelm/heat:newton-ubuntu_xenial elasticsearch_template: docker.io/openstackhelm/heat:newton-ubuntu_xenial image_repo_sync: docker.io/docker:17.07.0 pull_policy: IfNotPresent local_registry: active: false exclude: - dep_check - image_repo_sync dependencies: dynamic: common: local_image_registry: jobs: - fluentd-image-repo-sync services: - endpoint: node service: local_image_registry static: fluentd: services: null image_repo_sync: services: - endpoint: internal service: local_image_registry prometheus_fluentd_exporter: services: - endpoint: internal service: fluentd conf: fluentd: template: | bind 0.0.0.0 port 24220 @type monitor_agent bind 0.0.0.0 port "#{ENV['FLUENTD_PORT']}" @type forward @type null key log pattern /info/i tag info.${tag} key log pattern /warn/i tag warn.${tag} key log pattern /error/i tag error.${tag} key log pattern /critical/i tag critical.${tag} key log pattern (.+) tag info.${tag} @type rewrite_tag_filter enable_ruby true application ${record["kubernetes"]["labels"]["application"]} level ${tag_parts[0]} @type record_transformer application ${tag_parts[1]} @type record_transformer key level pattern INFO tag info.${tag} key level pattern WARN tag warn.${tag} key level pattern ERROR tag error.${tag} key level pattern CRITICAL tag critical.${tag} @type rewrite_tag_filter key application pattern keystone tag auth.${tag} key application pattern horizon tag auth.${tag} key application pattern mariadb tag auth.${tag} key application pattern memcached tag auth.${tag} key application pattern rabbitmq tag auth.${tag} @type rewrite_tag_filter chunk_limit_size 8MB flush_interval 15s flush_thread_count 8 queue_limit_length 256 retry_forever false retry_max_interval 30 host "#{ENV['ELASTICSEARCH_HOST']}" reload_connections false reconnect_on_error true reload_on_failure true include_tag_key true logstash_format true logstash_prefix libvirt password "#{ENV['ELASTICSEARCH_PASSWORD']}" port "#{ENV['ELASTICSEARCH_PORT']}" @type elasticsearch user "#{ENV['ELASTICSEARCH_USERNAME']}" chunk_limit_size 8MB flush_interval 15s flush_thread_count 8 queue_limit_length 256 retry_forever false retry_max_interval 30 host "#{ENV['ELASTICSEARCH_HOST']}" reload_connections false reconnect_on_error true reload_on_failure true include_tag_key true logstash_format true logstash_prefix qemu password "#{ENV['ELASTICSEARCH_PASSWORD']}" port "#{ENV['ELASTICSEARCH_PORT']}" @type elasticsearch user "#{ENV['ELASTICSEARCH_USERNAME']}" chunk_limit_size 8MB flush_interval 15s flush_thread_count 8 queue_limit_length 256 retry_forever false retry_max_interval 30 host "#{ENV['ELASTICSEARCH_HOST']}" reload_connections false reconnect_on_error true reload_on_failure true include_tag_key true logstash_format true logstash_prefix journal password "#{ENV['ELASTICSEARCH_PASSWORD']}" port "#{ENV['ELASTICSEARCH_PORT']}" @type elasticsearch user "#{ENV['ELASTICSEARCH_USERNAME']}" chunk_limit_size 8MB flush_interval 15s flush_thread_count 8 queue_limit_length 256 retry_forever false retry_max_interval 30 host "#{ENV['ELASTICSEARCH_HOST']}" reload_connections false reconnect_on_error true reload_on_failure true include_tag_key true logstash_format true logstash_prefix kernel password "#{ENV['ELASTICSEARCH_PASSWORD']}" port "#{ENV['ELASTICSEARCH_PORT']}" @type elasticsearch user "#{ENV['ELASTICSEARCH_USERNAME']}" chunk_limit_size 8MB flush_interval 15s flush_thread_count 8 queue_limit_length 256 retry_forever false retry_max_interval 30 host "#{ENV['ELASTICSEARCH_HOST']}" reload_connections false reconnect_on_error true reload_on_failure true include_tag_key true logstash_format true password "#{ENV['ELASTICSEARCH_PASSWORD']}" port "#{ENV['ELASTICSEARCH_PORT']}" @type elasticsearch type_name fluent user "#{ENV['ELASTICSEARCH_USERNAME']}" fluentd_exporter: log: format: "logger:stdout?json=true" level: "info" endpoints: cluster_domain_suffix: cluster.local local_image_registry: name: docker-registry namespace: docker-registry hosts: default: localhost internal: docker-registry node: localhost host_fqdn_override: default: null port: registry: node: 5000 elasticsearch: namespace: null name: elasticsearch auth: admin: username: admin password: changeme hosts: data: elasticsearch-data default: elasticsearch-logging discovery: elasticsearch-discovery public: elasticsearch host_fqdn_override: default: null path: default: null scheme: default: http port: http: default: 80 fluentd: namespace: null name: fluentd hosts: default: fluentd-logging host_fqdn_override: default: null path: default: null scheme: default: http port: service: default: 24224 metrics: default: 24220 kafka: namespace: null name: kafka auth: admin: username: admin password: changeme hosts: default: kafka-broker public: kafka host_fqdn_override: default: null path: default: null scheme: default: kafka port: broker: default: 9092 public: 80 prometheus_fluentd_exporter: namespace: null hosts: default: fluentd-exporter host_fqdn_override: default: null path: default: /metrics scheme: default: 'http' port: metrics: default: 9309 monitoring: prometheus: enabled: false fluentd_exporter: scrape: true network: fluentd: node_port: enabled: false port: 32329 network_policy: prometheus-fluentd-exporter: ingress: - {} egress: - {} fluentd: ingress: - {} egress: - {} pod: env: fluentd: null tolerations: fluentd: enabled: false security_context: fluentd: pod: runAsUser: 65534 container: fluentd: allowPrivilegeEscalation: false readOnlyRootFilesystem: true exporter: pod: runAsUser: 65534 container: fluentd_exporter: allowPrivilegeEscalation: false readOnlyRootFilesystem: true affinity: anti: type: default: preferredDuringSchedulingIgnoredDuringExecution topologyKey: default: kubernetes.io/hostname weight: default: 10 lifecycle: upgrades: deployments: revision_history: 3 pod_replacement_strategy: RollingUpdate rolling_update: max_unavailable: 1 max_surge: 3 daemonsets: pod_replacement_strategy: RollingUpdate fluentd: enabled: true min_ready_seconds: 0 max_unavailable: 1 termination_grace_period: fluentd: timeout: 30 prometheus_fluentd_exporter: timeout: 30 replicas: fluentd: 3 prometheus_fluentd_exporter: 1 resources: enabled: false fluentd: limits: memory: '1024Mi' cpu: '2000m' requests: memory: '128Mi' cpu: '500m' prometheus_fluentd_exporter: limits: memory: "1024Mi" cpu: "2000m" requests: memory: "128Mi" cpu: "500m" mounts: fluentd: fluentd: manifests: configmap_bin: true configmap_etc: true deployment_fluentd: true job_image_repo_sync: true monitoring: prometheus: configmap_bin_exporter: true deployment_exporter: true network_policy_exporter: false service_exporter: true network_policy: false secret_elasticsearch: true secret_kafka: false service_fluentd: true