mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-07-28 08:14:02 +02:00
Update icingabeat configuration with the latest config options
This commit is contained in:
parent
1a80e69145
commit
eb697707f2
@ -1,727 +0,0 @@
|
|||||||
################### Icingabeat Configuration Example #########################
|
|
||||||
|
|
||||||
############################# Icingabeat ######################################
|
|
||||||
|
|
||||||
icingabeat:
|
|
||||||
|
|
||||||
# Defines the Icinga API endpoint
|
|
||||||
host: "localhost"
|
|
||||||
|
|
||||||
# Defines the port of the API endpoint
|
|
||||||
port: 5665
|
|
||||||
|
|
||||||
# A user with sufficient permissions
|
|
||||||
user: "icinga"
|
|
||||||
|
|
||||||
# Password of the user
|
|
||||||
password: "icinga"
|
|
||||||
|
|
||||||
# Skip SSL verification
|
|
||||||
skip_ssl_verify: false
|
|
||||||
|
|
||||||
# Icingabeat supports capturing of an evenstream and periodical polling of the
|
|
||||||
# Icinga status data.
|
|
||||||
eventstream:
|
|
||||||
#
|
|
||||||
# Decide which events to receive from the event stream.
|
|
||||||
# The following event stream types are available:
|
|
||||||
#
|
|
||||||
# * CheckResult
|
|
||||||
# * StateChange
|
|
||||||
# * Notification
|
|
||||||
# * AcknowledgementSet
|
|
||||||
# * AcknowledgementCleared
|
|
||||||
# * CommentAdded
|
|
||||||
# * CommentRemoved
|
|
||||||
# * DowntimeAdded
|
|
||||||
# * DowntimeRemoved
|
|
||||||
# * DowntimeStarted
|
|
||||||
# * DowntimeTriggered
|
|
||||||
#
|
|
||||||
# To disable eventstream, leave the types empty or comment out the option
|
|
||||||
types:
|
|
||||||
- CheckResult
|
|
||||||
- StateChange
|
|
||||||
|
|
||||||
# Event streams can be filtered by attributes using the prefix 'event.'
|
|
||||||
#
|
|
||||||
# Example for the CheckResult type with the exit_code set to 2:
|
|
||||||
# filter: "event.check_result.exit_status==2"
|
|
||||||
#
|
|
||||||
# Example for the CheckResult type with the service matching the string
|
|
||||||
# pattern "mysql*":
|
|
||||||
# filter: 'match("mysql*", event.service)'
|
|
||||||
#
|
|
||||||
# To disable filtering set an empty string or comment out the filter option
|
|
||||||
filter: ""
|
|
||||||
|
|
||||||
# Defines how fast to reconnect to the API on connection loss
|
|
||||||
retry_interval: 10s
|
|
||||||
|
|
||||||
statuspoller:
|
|
||||||
# Interval at which the status API is called. Set to 0 to disable polling.
|
|
||||||
interval: 60s
|
|
||||||
|
|
||||||
#================================ General ======================================
|
|
||||||
|
|
||||||
# The name of the shipper that publishes the network data. It can be used to group
|
|
||||||
# all the transactions sent by a single shipper in the web interface.
|
|
||||||
# If this options is not defined, the hostname is used.
|
|
||||||
#name:
|
|
||||||
|
|
||||||
# The tags of the shipper are included in their own field with each
|
|
||||||
# transaction published. Tags make it easy to group servers by different
|
|
||||||
# logical properties.
|
|
||||||
#tags: ["service-X", "web-tier"]
|
|
||||||
|
|
||||||
# Optional fields that you can specify to add additional information to the
|
|
||||||
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
|
||||||
# combination of these.
|
|
||||||
#fields:
|
|
||||||
# env: staging
|
|
||||||
|
|
||||||
# If this option is set to true, the custom fields are stored as top-level
|
|
||||||
# fields in the output document instead of being grouped under a fields
|
|
||||||
# sub-dictionary. Default is false.
|
|
||||||
#fields_under_root: false
|
|
||||||
|
|
||||||
# Internal queue size for single events in processing pipeline
|
|
||||||
#queue_size: 1000
|
|
||||||
|
|
||||||
# The internal queue size for bulk events in the processing pipeline.
|
|
||||||
# Do not modify this value.
|
|
||||||
#bulk_queue_size: 0
|
|
||||||
|
|
||||||
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
|
||||||
# default is the number of logical CPUs available in the system.
|
|
||||||
#max_procs:
|
|
||||||
|
|
||||||
#================================ Processors ===================================
|
|
||||||
|
|
||||||
# Processors are used to reduce the number of fields in the exported event or to
|
|
||||||
# enhance the event with external metadata. This section defines a list of
|
|
||||||
# processors that are applied one by one and the first one receives the initial
|
|
||||||
# event:
|
|
||||||
#
|
|
||||||
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
|
||||||
#
|
|
||||||
# The supported processors are drop_fields, drop_event, include_fields, and
|
|
||||||
# add_cloud_metadata.
|
|
||||||
#
|
|
||||||
# For example, you can use the following processors to keep the fields that
|
|
||||||
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
|
||||||
# values:
|
|
||||||
#
|
|
||||||
#processors:
|
|
||||||
#- include_fields:
|
|
||||||
# fields: ["cpu"]
|
|
||||||
#- drop_fields:
|
|
||||||
# fields: ["cpu.user", "cpu.system"]
|
|
||||||
#
|
|
||||||
# The following example drops the events that have the HTTP response code 200:
|
|
||||||
#
|
|
||||||
#processors:
|
|
||||||
#- drop_event:
|
|
||||||
# when:
|
|
||||||
# equals:
|
|
||||||
# http.code: 200
|
|
||||||
#
|
|
||||||
# The following example enriches each event with metadata from the cloud
|
|
||||||
# provider about the host machine. It works on EC2, GCE, and DigitalOcean.
|
|
||||||
#
|
|
||||||
#processors:
|
|
||||||
#- add_cloud_metadata:
|
|
||||||
#
|
|
||||||
|
|
||||||
#================================ Outputs ======================================
|
|
||||||
|
|
||||||
# Configure what outputs to use when sending the data collected by the beat.
|
|
||||||
# Multiple outputs may be used.
|
|
||||||
|
|
||||||
#-------------------------- Elasticsearch output -------------------------------
|
|
||||||
output.elasticsearch:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# Array of hosts to connect to.
|
|
||||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
|
||||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
|
||||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
|
||||||
hosts: ["localhost:9200"]
|
|
||||||
|
|
||||||
# Set gzip compression level.
|
|
||||||
#compression_level: 0
|
|
||||||
|
|
||||||
# Optional protocol and basic auth credentials.
|
|
||||||
#protocol: "https"
|
|
||||||
#username: "elastic"
|
|
||||||
#password: "changeme"
|
|
||||||
|
|
||||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
|
||||||
#parameters:
|
|
||||||
#param1: value1
|
|
||||||
#param2: value2
|
|
||||||
|
|
||||||
# Number of workers per Elasticsearch host.
|
|
||||||
#worker: 1
|
|
||||||
|
|
||||||
# Optional index name. The default is "icingabeat" plus date
|
|
||||||
# and generates [icingabeat-]YYYY.MM.DD keys.
|
|
||||||
#index: "icingabeat-%{+yyyy.MM.dd}"
|
|
||||||
|
|
||||||
# Optional ingest node pipeline. By default no pipeline will be used.
|
|
||||||
#pipeline: ""
|
|
||||||
|
|
||||||
# Optional HTTP Path
|
|
||||||
#path: "/elasticsearch"
|
|
||||||
|
|
||||||
# Custom HTTP headers to add to each request
|
|
||||||
#headers:
|
|
||||||
# X-My-Header: Contents of the header
|
|
||||||
|
|
||||||
# Proxy server url
|
|
||||||
#proxy_url: http://proxy:3128
|
|
||||||
|
|
||||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
|
||||||
# the indexing operation doesn't succeed after this many retries, the events are
|
|
||||||
# dropped. The default is 3.
|
|
||||||
#max_retries: 3
|
|
||||||
|
|
||||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
|
||||||
# The default is 50.
|
|
||||||
#bulk_max_size: 50
|
|
||||||
|
|
||||||
# Configure http request timeout before failing an request to Elasticsearch.
|
|
||||||
#timeout: 90
|
|
||||||
|
|
||||||
# The number of seconds to wait for new events between two bulk API index requests.
|
|
||||||
# If `bulk_max_size` is reached before this interval expires, addition bulk index
|
|
||||||
# requests are made.
|
|
||||||
#flush_interval: 1s
|
|
||||||
|
|
||||||
# A template is used to set the mapping in Elasticsearch
|
|
||||||
# By default template loading is enabled and the template is loaded.
|
|
||||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
|
||||||
|
|
||||||
# Set to false to disable template loading.
|
|
||||||
#template.enabled: true
|
|
||||||
|
|
||||||
# Template name. By default the template name is icingabeat.
|
|
||||||
#template.name: "icingabeat"
|
|
||||||
|
|
||||||
# Path to template file
|
|
||||||
#template.path: "${path.config}/icingabeat.template.json"
|
|
||||||
|
|
||||||
# Overwrite existing template
|
|
||||||
#template.overwrite: false
|
|
||||||
|
|
||||||
# If set to true, icingabeat checks the Elasticsearch version at connect time, and if it
|
|
||||||
# is 2.x, it loads the file specified by the template.versions.2x.path setting. The
|
|
||||||
# default is true.
|
|
||||||
#template.versions.2x.enabled: true
|
|
||||||
|
|
||||||
# Path to the Elasticsearch 2.x version of the template file.
|
|
||||||
#template.versions.2x.path: "${path.config}/icingabeat.template-es2x.json"
|
|
||||||
|
|
||||||
# If set to true, icingabeat checks the Elasticsearch version at connect time, and if it
|
|
||||||
# is 6.x, it loads the file specified by the template.versions.6x.path setting. The
|
|
||||||
# default is true.
|
|
||||||
#template.versions.6x.enabled: true
|
|
||||||
|
|
||||||
# Path to the Elasticsearch 6.x version of the template file.
|
|
||||||
#template.versions.6x.path: "${path.config}/icingabeat.template-es6x.json"
|
|
||||||
|
|
||||||
# Use SSL settings for HTTPS. Default is true.
|
|
||||||
#ssl.enabled: true
|
|
||||||
|
|
||||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
||||||
# and certificates will be accepted. In this mode, SSL based connections are
|
|
||||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
||||||
# `full`.
|
|
||||||
#ssl.verification_mode: full
|
|
||||||
|
|
||||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
||||||
# 1.2 are enabled.
|
|
||||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
||||||
|
|
||||||
# SSL configuration. By default is off.
|
|
||||||
# List of root certificates for HTTPS server verifications
|
|
||||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
||||||
|
|
||||||
# Certificate for SSL client authentication
|
|
||||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
||||||
|
|
||||||
# Client Certificate Key
|
|
||||||
#ssl.key: "/etc/pki/client/cert.key"
|
|
||||||
|
|
||||||
# Optional passphrase for decrypting the Certificate Key.
|
|
||||||
#ssl.key_passphrase: ''
|
|
||||||
|
|
||||||
# Configure cipher suites to be used for SSL connections
|
|
||||||
#ssl.cipher_suites: []
|
|
||||||
|
|
||||||
# Configure curve types for ECDHE based cipher suites
|
|
||||||
#ssl.curve_types: []
|
|
||||||
|
|
||||||
# Configure what types of renegotiation are supported. Valid options are
|
|
||||||
# never, once, and freely. Default is never.
|
|
||||||
#ssl.renegotiation: never
|
|
||||||
|
|
||||||
|
|
||||||
#----------------------------- Logstash output ---------------------------------
|
|
||||||
#output.logstash:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# The Logstash hosts
|
|
||||||
#hosts: ["localhost:5044"]
|
|
||||||
|
|
||||||
# Number of workers per Logstash host.
|
|
||||||
#worker: 1
|
|
||||||
|
|
||||||
# Set gzip compression level.
|
|
||||||
#compression_level: 3
|
|
||||||
|
|
||||||
# Optional load balance the events between the Logstash hosts
|
|
||||||
#loadbalance: true
|
|
||||||
|
|
||||||
# Number of batches to be send asynchronously to logstash while processing
|
|
||||||
# new batches.
|
|
||||||
#pipelining: 0
|
|
||||||
|
|
||||||
# If enabled only a subset of events in a batch of events is transferred per
|
|
||||||
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
|
||||||
# if no error is encountered.
|
|
||||||
#slow_start: false
|
|
||||||
|
|
||||||
# Optional index name. The default index name is set to name of the beat
|
|
||||||
# in all lowercase.
|
|
||||||
#index: 'icingabeat'
|
|
||||||
|
|
||||||
# SOCKS5 proxy server URL
|
|
||||||
#proxy_url: socks5://user:password@socks5-server:2233
|
|
||||||
|
|
||||||
# Resolve names locally when using a proxy server. Defaults to false.
|
|
||||||
#proxy_use_local_resolver: false
|
|
||||||
|
|
||||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
||||||
#ssl.enabled: true
|
|
||||||
|
|
||||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
||||||
# and certificates will be accepted. In this mode, SSL based connections are
|
|
||||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
||||||
# `full`.
|
|
||||||
#ssl.verification_mode: full
|
|
||||||
|
|
||||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
||||||
# 1.2 are enabled.
|
|
||||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
||||||
|
|
||||||
# Optional SSL configuration options. SSL is off by default.
|
|
||||||
# List of root certificates for HTTPS server verifications
|
|
||||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
||||||
|
|
||||||
# Certificate for SSL client authentication
|
|
||||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
||||||
|
|
||||||
# Client Certificate Key
|
|
||||||
#ssl.key: "/etc/pki/client/cert.key"
|
|
||||||
|
|
||||||
# Optional passphrase for decrypting the Certificate Key.
|
|
||||||
#ssl.key_passphrase: ''
|
|
||||||
|
|
||||||
# Configure cipher suites to be used for SSL connections
|
|
||||||
#ssl.cipher_suites: []
|
|
||||||
|
|
||||||
# Configure curve types for ECDHE based cipher suites
|
|
||||||
#ssl.curve_types: []
|
|
||||||
|
|
||||||
# Configure what types of renegotiation are supported. Valid options are
|
|
||||||
# never, once, and freely. Default is never.
|
|
||||||
#ssl.renegotiation: never
|
|
||||||
|
|
||||||
#------------------------------- Kafka output ----------------------------------
|
|
||||||
#output.kafka:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# The list of Kafka broker addresses from where to fetch the cluster metadata.
|
|
||||||
# The cluster metadata contain the actual Kafka brokers events are published
|
|
||||||
# to.
|
|
||||||
#hosts: ["localhost:9092"]
|
|
||||||
|
|
||||||
# The Kafka topic used for produced events. The setting can be a format string
|
|
||||||
# using any event field. To set the topic from document type use `%{[type]}`.
|
|
||||||
#topic: beats
|
|
||||||
|
|
||||||
# The Kafka event key setting. Use format string to create unique event key.
|
|
||||||
# By default no event key will be generated.
|
|
||||||
#key: ''
|
|
||||||
|
|
||||||
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
|
|
||||||
# using the `output.kafka.key` setting or randomly distributes events if
|
|
||||||
# `output.kafka.key` is not configured.
|
|
||||||
#partition.hash:
|
|
||||||
# If enabled, events will only be published to partitions with reachable
|
|
||||||
# leaders. Default is false.
|
|
||||||
#reachable_only: false
|
|
||||||
|
|
||||||
# Configure alternative event field names used to compute the hash value.
|
|
||||||
# If empty `output.kafka.key` setting will be used.
|
|
||||||
# Default value is empty list.
|
|
||||||
#hash: []
|
|
||||||
|
|
||||||
# Authentication details. Password is required if username is set.
|
|
||||||
#username: ''
|
|
||||||
#password: ''
|
|
||||||
|
|
||||||
# Kafka version icingabeat is assumed to run against. Defaults to the oldest
|
|
||||||
# supported stable version (currently version 0.8.2.0)
|
|
||||||
#version: 0.8.2
|
|
||||||
|
|
||||||
# Metadata update configuration. Metadata do contain leader information
|
|
||||||
# deciding which broker to use when publishing.
|
|
||||||
#metadata:
|
|
||||||
# Max metadata request retry attempts when cluster is in middle of leader
|
|
||||||
# election. Defaults to 3 retries.
|
|
||||||
#retry.max: 3
|
|
||||||
|
|
||||||
# Waiting time between retries during leader elections. Default is 250ms.
|
|
||||||
#retry.backoff: 250ms
|
|
||||||
|
|
||||||
# Refresh metadata interval. Defaults to every 10 minutes.
|
|
||||||
#refresh_frequency: 10m
|
|
||||||
|
|
||||||
# The number of concurrent load-balanced Kafka output workers.
|
|
||||||
#worker: 1
|
|
||||||
|
|
||||||
# The number of times to retry publishing an event after a publishing failure.
|
|
||||||
# After the specified number of retries, the events are typically dropped.
|
|
||||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
|
||||||
# all events are published. Set max_retries to a value less than 0 to retry
|
|
||||||
# until all events are published. The default is 3.
|
|
||||||
#max_retries: 3
|
|
||||||
|
|
||||||
# The maximum number of events to bulk in a single Kafka request. The default
|
|
||||||
# is 2048.
|
|
||||||
#bulk_max_size: 2048
|
|
||||||
|
|
||||||
# The number of seconds to wait for responses from the Kafka brokers before
|
|
||||||
# timing out. The default is 30s.
|
|
||||||
#timeout: 30s
|
|
||||||
|
|
||||||
# The maximum duration a broker will wait for number of required ACKs. The
|
|
||||||
# default is 10s.
|
|
||||||
#broker_timeout: 10s
|
|
||||||
|
|
||||||
# The number of messages buffered for each Kafka broker. The default is 256.
|
|
||||||
#channel_buffer_size: 256
|
|
||||||
|
|
||||||
# The keep-alive period for an active network connection. If 0s, keep-alives
|
|
||||||
# are disabled. The default is 0 seconds.
|
|
||||||
#keep_alive: 0
|
|
||||||
|
|
||||||
# Sets the output compression codec. Must be one of none, snappy and gzip. The
|
|
||||||
# default is gzip.
|
|
||||||
#compression: gzip
|
|
||||||
|
|
||||||
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
|
||||||
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
|
||||||
# or less than the broker's message.max.bytes.
|
|
||||||
#max_message_bytes: 1000000
|
|
||||||
|
|
||||||
# The ACK reliability level required from broker. 0=no response, 1=wait for
|
|
||||||
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
|
|
||||||
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
|
|
||||||
# on error.
|
|
||||||
#required_acks: 1
|
|
||||||
|
|
||||||
# The number of seconds to wait for new events between two producer API calls.
|
|
||||||
#flush_interval: 1s
|
|
||||||
|
|
||||||
# The configurable ClientID used for logging, debugging, and auditing
|
|
||||||
# purposes. The default is "beats".
|
|
||||||
#client_id: beats
|
|
||||||
|
|
||||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
||||||
#ssl.enabled: true
|
|
||||||
|
|
||||||
# Optional SSL configuration options. SSL is off by default.
|
|
||||||
# List of root certificates for HTTPS server verifications
|
|
||||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
||||||
|
|
||||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
||||||
# and certificates will be accepted. In this mode, SSL based connections are
|
|
||||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
||||||
# `full`.
|
|
||||||
#ssl.verification_mode: full
|
|
||||||
|
|
||||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
||||||
# 1.2 are enabled.
|
|
||||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
||||||
|
|
||||||
# Certificate for SSL client authentication
|
|
||||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
||||||
|
|
||||||
# Client Certificate Key
|
|
||||||
#ssl.key: "/etc/pki/client/cert.key"
|
|
||||||
|
|
||||||
# Optional passphrase for decrypting the Certificate Key.
|
|
||||||
#ssl.key_passphrase: ''
|
|
||||||
|
|
||||||
# Configure cipher suites to be used for SSL connections
|
|
||||||
#ssl.cipher_suites: []
|
|
||||||
|
|
||||||
# Configure curve types for ECDHE based cipher suites
|
|
||||||
#ssl.curve_types: []
|
|
||||||
|
|
||||||
# Configure what types of renegotiation are supported. Valid options are
|
|
||||||
# never, once, and freely. Default is never.
|
|
||||||
#ssl.renegotiation: never
|
|
||||||
|
|
||||||
#------------------------------- Redis output ----------------------------------
|
|
||||||
#output.redis:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# The list of Redis servers to connect to. If load balancing is enabled, the
|
|
||||||
# events are distributed to the servers in the list. If one server becomes
|
|
||||||
# unreachable, the events are distributed to the reachable servers only.
|
|
||||||
#hosts: ["localhost:6379"]
|
|
||||||
|
|
||||||
# The Redis port to use if hosts does not contain a port number. The default
|
|
||||||
# is 6379.
|
|
||||||
#port: 6379
|
|
||||||
|
|
||||||
# The name of the Redis list or channel the events are published to. The
|
|
||||||
# default is icingabeat.
|
|
||||||
#key: icingabeat
|
|
||||||
|
|
||||||
# The password to authenticate with. The default is no authentication.
|
|
||||||
#password:
|
|
||||||
|
|
||||||
# The Redis database number where the events are published. The default is 0.
|
|
||||||
#db: 0
|
|
||||||
|
|
||||||
# The Redis data type to use for publishing events. If the data type is list,
|
|
||||||
# the Redis RPUSH command is used. If the data type is channel, the Redis
|
|
||||||
# PUBLISH command is used. The default value is list.
|
|
||||||
#datatype: list
|
|
||||||
|
|
||||||
# The number of workers to use for each host configured to publish events to
|
|
||||||
# Redis. Use this setting along with the loadbalance option. For example, if
|
|
||||||
# you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
|
|
||||||
# host).
|
|
||||||
#worker: 1
|
|
||||||
|
|
||||||
# If set to true and multiple hosts or workers are configured, the output
|
|
||||||
# plugin load balances published events onto all Redis hosts. If set to false,
|
|
||||||
# the output plugin sends all events to only one host (determined at random)
|
|
||||||
# and will switch to another host if the currently selected one becomes
|
|
||||||
# unreachable. The default value is true.
|
|
||||||
#loadbalance: true
|
|
||||||
|
|
||||||
# The Redis connection timeout in seconds. The default is 5 seconds.
|
|
||||||
#timeout: 5s
|
|
||||||
|
|
||||||
# The number of times to retry publishing an event after a publishing failure.
|
|
||||||
# After the specified number of retries, the events are typically dropped.
|
|
||||||
# Some Beats, such as Filebeat, ignore the max_retries setting and retry until
|
|
||||||
# all events are published. Set max_retries to a value less than 0 to retry
|
|
||||||
# until all events are published. The default is 3.
|
|
||||||
#max_retries: 3
|
|
||||||
|
|
||||||
# The maximum number of events to bulk in a single Redis request or pipeline.
|
|
||||||
# The default is 2048.
|
|
||||||
#bulk_max_size: 2048
|
|
||||||
|
|
||||||
# The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
|
|
||||||
# value must be a URL with a scheme of socks5://.
|
|
||||||
#proxy_url:
|
|
||||||
|
|
||||||
# This option determines whether Redis hostnames are resolved locally when
|
|
||||||
# using a proxy. The default value is false, which means that name resolution
|
|
||||||
# occurs on the proxy server.
|
|
||||||
#proxy_use_local_resolver: false
|
|
||||||
|
|
||||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
||||||
#ssl.enabled: true
|
|
||||||
|
|
||||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
||||||
# and certificates will be accepted. In this mode, SSL based connections are
|
|
||||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
||||||
# `full`.
|
|
||||||
#ssl.verification_mode: full
|
|
||||||
|
|
||||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
||||||
# 1.2 are enabled.
|
|
||||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
||||||
|
|
||||||
# Optional SSL configuration options. SSL is off by default.
|
|
||||||
# List of root certificates for HTTPS server verifications
|
|
||||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
|
||||||
|
|
||||||
# Certificate for SSL client authentication
|
|
||||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
||||||
|
|
||||||
# Client Certificate Key
|
|
||||||
#ssl.key: "/etc/pki/client/cert.key"
|
|
||||||
|
|
||||||
# Optional passphrase for decrypting the Certificate Key.
|
|
||||||
#ssl.key_passphrase: ''
|
|
||||||
|
|
||||||
# Configure cipher suites to be used for SSL connections
|
|
||||||
#ssl.cipher_suites: []
|
|
||||||
|
|
||||||
# Configure curve types for ECDHE based cipher suites
|
|
||||||
#ssl.curve_types: []
|
|
||||||
|
|
||||||
# Configure what types of renegotiation are supported. Valid options are
|
|
||||||
# never, once, and freely. Default is never.
|
|
||||||
#ssl.renegotiation: never
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------- File output -----------------------------------
|
|
||||||
#output.file:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# Path to the directory where to save the generated files. The option is
|
|
||||||
# mandatory.
|
|
||||||
#path: "/tmp/icingabeat"
|
|
||||||
|
|
||||||
# Name of the generated files. The default is `icingabeat` and it generates
|
|
||||||
# files: `icingabeat`, `icingabeat.1`, `icingabeat.2`, etc.
|
|
||||||
#filename: icingabeat
|
|
||||||
|
|
||||||
# Maximum size in kilobytes of each file. When this size is reached, and on
|
|
||||||
# every icingabeat restart, the files are rotated. The default value is 10240
|
|
||||||
# kB.
|
|
||||||
#rotate_every_kb: 10000
|
|
||||||
|
|
||||||
# Maximum number of files under path. When this number of files is reached,
|
|
||||||
# the oldest file is deleted and the rest are shifted from last to first. The
|
|
||||||
# default is 7 files.
|
|
||||||
#number_of_files: 7
|
|
||||||
|
|
||||||
|
|
||||||
#----------------------------- Console output ---------------------------------
|
|
||||||
#output.console:
|
|
||||||
# Boolean flag to enable or disable the output module.
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# Pretty print json event
|
|
||||||
#pretty: false
|
|
||||||
|
|
||||||
#================================= Paths ======================================
|
|
||||||
|
|
||||||
# The home path for the icingabeat installation. This is the default base path
|
|
||||||
# for all other path settings and for miscellaneous files that come with the
|
|
||||||
# distribution (for example, the sample dashboards).
|
|
||||||
# If not set by a CLI flag or in the configuration file, the default for the
|
|
||||||
# home path is the location of the binary.
|
|
||||||
#path.home:
|
|
||||||
|
|
||||||
# The configuration path for the icingabeat installation. This is the default
|
|
||||||
# base path for configuration files, including the main YAML configuration file
|
|
||||||
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
|
||||||
# configuration file, the default for the configuration path is the home path.
|
|
||||||
#path.config: ${path.home}
|
|
||||||
|
|
||||||
# The data path for the icingabeat installation. This is the default base path
|
|
||||||
# for all the files in which icingabeat needs to store its data. If not set by a
|
|
||||||
# CLI flag or in the configuration file, the default for the data path is a data
|
|
||||||
# subdirectory inside the home path.
|
|
||||||
#path.data: ${path.home}/data
|
|
||||||
|
|
||||||
# The logs path for a icingabeat installation. This is the default location for
|
|
||||||
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
|
||||||
# the default for the logs path is a logs subdirectory inside the home path.
|
|
||||||
#path.logs: ${path.home}/logs
|
|
||||||
|
|
||||||
#============================== Dashboards =====================================
|
|
||||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
|
||||||
# the dashboards is disabled by default and can be enabled either by setting the
|
|
||||||
# options here, or by using the `-setup` CLI flag.
|
|
||||||
#dashboards.enabled: false
|
|
||||||
|
|
||||||
# The URL from where to download the dashboards archive. By default this URL
|
|
||||||
# has a value which is computed based on the Beat name and version. For released
|
|
||||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
|
||||||
# website.
|
|
||||||
#dashboards.url:
|
|
||||||
|
|
||||||
# The directory from where to read the dashboards. It is used instead of the URL
|
|
||||||
# when it has a value.
|
|
||||||
#dashboards.directory:
|
|
||||||
|
|
||||||
# The file archive (zip file) from where to read the dashboards. It is used instead
|
|
||||||
# of the URL when it has a value.
|
|
||||||
#dashboards.file:
|
|
||||||
|
|
||||||
# If this option is enabled, the snapshot URL is used instead of the default URL.
|
|
||||||
#dashboards.snapshot: false
|
|
||||||
|
|
||||||
# The URL from where to download the snapshot version of the dashboards. By default
|
|
||||||
# this has a value which is computed based on the Beat name and version.
|
|
||||||
#dashboards.snapshot_url
|
|
||||||
|
|
||||||
# In case the archive contains the dashboards from multiple Beats, this lets you
|
|
||||||
# select which one to load. You can load all the dashboards in the archive by
|
|
||||||
# setting this to the empty string.
|
|
||||||
#dashboards.beat: icingabeat
|
|
||||||
|
|
||||||
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
|
||||||
#dashboards.kibana_index: .kibana
|
|
||||||
|
|
||||||
# The Elasticsearch index name. This overwrites the index name defined in the
|
|
||||||
# dashboards and index pattern. Example: testbeat-*
|
|
||||||
#dashboards.index:
|
|
||||||
|
|
||||||
#================================ Logging ======================================
|
|
||||||
# There are three options for the log output: syslog, file, stderr.
|
|
||||||
# Under Windows systems, the log files are per default sent to the file output,
|
|
||||||
# under all other system per default to syslog.
|
|
||||||
|
|
||||||
# Sets log level. The default log level is info.
|
|
||||||
# Available log levels are: critical, error, warning, info, debug
|
|
||||||
#logging.level: info
|
|
||||||
|
|
||||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
|
||||||
# Other available selectors are "beat", "publish", "service"
|
|
||||||
# Multiple selectors can be chained.
|
|
||||||
#logging.selectors: [ ]
|
|
||||||
|
|
||||||
# Send all logging output to syslog. The default is false.
|
|
||||||
#logging.to_syslog: true
|
|
||||||
|
|
||||||
# If enabled, icingabeat periodically logs its internal metrics that have changed
|
|
||||||
# in the last period. For each metric that changed, the delta from the value at
|
|
||||||
# the beginning of the period is logged. Also, the total values for
|
|
||||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
|
||||||
#logging.metrics.enabled: true
|
|
||||||
|
|
||||||
# The period after which to log the internal metrics. The default is 30s.
|
|
||||||
#logging.metrics.period: 30s
|
|
||||||
|
|
||||||
# Logging to rotating files files. Set logging.to_files to false to disable logging to
|
|
||||||
# files.
|
|
||||||
logging.to_files: true
|
|
||||||
logging.files:
|
|
||||||
# Configure the path where the logs are written. The default is the logs directory
|
|
||||||
# under the home path (the binary location).
|
|
||||||
#path: /var/log/icingabeat
|
|
||||||
|
|
||||||
# The name of the files where the logs are written to.
|
|
||||||
#name: icingabeat
|
|
||||||
|
|
||||||
# Configure log file size limit. If limit is reached, log file will be
|
|
||||||
# automatically rotated
|
|
||||||
#rotateeverybytes: 10485760 # = 10MB
|
|
||||||
|
|
||||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
|
||||||
#keepfiles: 7
|
|
||||||
|
|
||||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
|
||||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
|
||||||
#permissions: 0600
|
|
@ -189,8 +189,8 @@ icingabeat:
|
|||||||
#
|
#
|
||||||
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
||||||
#
|
#
|
||||||
# The supported processors are drop_fields, drop_event, include_fields, and
|
# The supported processors are drop_fields, drop_event, include_fields,
|
||||||
# add_cloud_metadata.
|
# decode_json_fields, and add_cloud_metadata.
|
||||||
#
|
#
|
||||||
# For example, you can use the following processors to keep the fields that
|
# For example, you can use the following processors to keep the fields that
|
||||||
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
||||||
@ -218,6 +218,14 @@ icingabeat:
|
|||||||
# - from: "a"
|
# - from: "a"
|
||||||
# to: "b"
|
# to: "b"
|
||||||
#
|
#
|
||||||
|
# The following example tokenizes the string into fields:
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- dissect:
|
||||||
|
# tokenizer: "%{key1} - %{key2}"
|
||||||
|
# field: "message"
|
||||||
|
# target_prefix: "dissect"
|
||||||
|
#
|
||||||
# The following example enriches each event with metadata from the cloud
|
# The following example enriches each event with metadata from the cloud
|
||||||
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
|
||||||
# Tencent Cloud, and Alibaba Cloud.
|
# Tencent Cloud, and Alibaba Cloud.
|
||||||
@ -256,7 +264,31 @@ icingabeat:
|
|||||||
#
|
#
|
||||||
#processors:
|
#processors:
|
||||||
#- add_docker_metadata: ~
|
#- add_docker_metadata: ~
|
||||||
#- add_host_metadata: ~
|
#
|
||||||
|
# The following example enriches each event with host metadata.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_host_metadata:
|
||||||
|
# netinfo.enabled: false
|
||||||
|
#
|
||||||
|
# The following example enriches each event with process metadata using
|
||||||
|
# process IDs included in the event.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- add_process_metadata:
|
||||||
|
# match_pids: ["system.process.ppid"]
|
||||||
|
# target: system.process.parent
|
||||||
|
#
|
||||||
|
# The following example decodes fields containing JSON strings
|
||||||
|
# and replaces the strings with valid JSON objects.
|
||||||
|
#
|
||||||
|
#processors:
|
||||||
|
#- decode_json_fields:
|
||||||
|
# fields: ["field1", "field2", ...]
|
||||||
|
# process_array: false
|
||||||
|
# max_depth: 1
|
||||||
|
# target: ""
|
||||||
|
# overwrite_keys: false
|
||||||
|
|
||||||
#============================= Elastic Cloud ==================================
|
#============================= Elastic Cloud ==================================
|
||||||
|
|
||||||
@ -289,6 +321,9 @@ output.elasticsearch:
|
|||||||
# Set gzip compression level.
|
# Set gzip compression level.
|
||||||
#compression_level: 0
|
#compression_level: 0
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
# Optional protocol and basic auth credentials.
|
# Optional protocol and basic auth credentials.
|
||||||
#protocol: "https"
|
#protocol: "https"
|
||||||
#username: "elastic"
|
#username: "elastic"
|
||||||
@ -394,6 +429,9 @@ output.elasticsearch:
|
|||||||
# Set gzip compression level.
|
# Set gzip compression level.
|
||||||
#compression_level: 3
|
#compression_level: 3
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
# Optional maximum time to live for a connection to Logstash, after which the
|
# Optional maximum time to live for a connection to Logstash, after which the
|
||||||
# connection will be re-established. A value of `0s` (the default) will
|
# connection will be re-established. A value of `0s` (the default) will
|
||||||
# disable this feature.
|
# disable this feature.
|
||||||
@ -470,6 +508,21 @@ output.elasticsearch:
|
|||||||
# never, once, and freely. Default is never.
|
# never, once, and freely. Default is never.
|
||||||
#ssl.renegotiation: never
|
#ssl.renegotiation: never
|
||||||
|
|
||||||
|
# The number of times to retry publishing an event after a publishing failure.
|
||||||
|
# After the specified number of retries, the events are typically dropped.
|
||||||
|
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
|
||||||
|
# and retry until all events are published. Set max_retries to a value less
|
||||||
|
# than 0 to retry until all events are published. The default is 3.
|
||||||
|
#max_retries: 3
|
||||||
|
|
||||||
|
# The maximum number of events to bulk in a single Logstash request. The
|
||||||
|
# default is 2048.
|
||||||
|
#bulk_max_size: 2048
|
||||||
|
|
||||||
|
# The number of seconds to wait for responses from the Logstash server before
|
||||||
|
# timing out. The default is 30s.
|
||||||
|
#timeout: 30s
|
||||||
|
|
||||||
#------------------------------- Kafka output ----------------------------------
|
#------------------------------- Kafka output ----------------------------------
|
||||||
#output.kafka:
|
#output.kafka:
|
||||||
# Boolean flag to enable or disable the output module.
|
# Boolean flag to enable or disable the output module.
|
||||||
@ -505,9 +558,16 @@ output.elasticsearch:
|
|||||||
#username: ''
|
#username: ''
|
||||||
#password: ''
|
#password: ''
|
||||||
|
|
||||||
# Kafka version icingabeat is assumed to run against. Defaults to the oldest
|
# Kafka version icingabeat is assumed to run against. Defaults to the "1.0.0".
|
||||||
# supported stable version (currently version 0.8.2.0)
|
#version: '1.0.0'
|
||||||
#version: 0.8.2
|
|
||||||
|
# Configure JSON encoding
|
||||||
|
#codec.json:
|
||||||
|
# Pretty print json event
|
||||||
|
#pretty: false
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
# Metadata update configuration. Metadata do contain leader information
|
# Metadata update configuration. Metadata do contain leader information
|
||||||
# deciding which broker to use when publishing.
|
# deciding which broker to use when publishing.
|
||||||
@ -555,6 +615,10 @@ output.elasticsearch:
|
|||||||
# default is gzip.
|
# default is gzip.
|
||||||
#compression: gzip
|
#compression: gzip
|
||||||
|
|
||||||
|
# Set the compression level. Currently only gzip provides a compression level
|
||||||
|
# between 0 and 9. The default value is chosen by the compression algorithm.
|
||||||
|
#compression_level: 4
|
||||||
|
|
||||||
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
|
||||||
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
# dropped. The default value is 1000000 (bytes). This value should be equal to
|
||||||
# or less than the broker's message.max.bytes.
|
# or less than the broker's message.max.bytes.
|
||||||
@ -611,6 +675,14 @@ output.elasticsearch:
|
|||||||
# Boolean flag to enable or disable the output module.
|
# Boolean flag to enable or disable the output module.
|
||||||
#enabled: true
|
#enabled: true
|
||||||
|
|
||||||
|
# Configure JSON encoding
|
||||||
|
#codec.json:
|
||||||
|
# Pretty print json event
|
||||||
|
#pretty: false
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
# The list of Redis servers to connect to. If load balancing is enabled, the
|
# The list of Redis servers to connect to. If load balancing is enabled, the
|
||||||
# events are distributed to the servers in the list. If one server becomes
|
# events are distributed to the servers in the list. If one server becomes
|
||||||
# unreachable, the events are distributed to the reachable servers only.
|
# unreachable, the events are distributed to the reachable servers only.
|
||||||
@ -658,6 +730,17 @@ output.elasticsearch:
|
|||||||
# until all events are published. The default is 3.
|
# until all events are published. The default is 3.
|
||||||
#max_retries: 3
|
#max_retries: 3
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Redis
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
#backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Redis after a network error. The default is 60s.
|
||||||
|
#backoff.max: 60s
|
||||||
|
|
||||||
# The maximum number of events to bulk in a single Redis request or pipeline.
|
# The maximum number of events to bulk in a single Redis request or pipeline.
|
||||||
# The default is 2048.
|
# The default is 2048.
|
||||||
#bulk_max_size: 2048
|
#bulk_max_size: 2048
|
||||||
@ -712,6 +795,14 @@ output.elasticsearch:
|
|||||||
# Boolean flag to enable or disable the output module.
|
# Boolean flag to enable or disable the output module.
|
||||||
#enabled: true
|
#enabled: true
|
||||||
|
|
||||||
|
# Configure JSON encoding
|
||||||
|
#codec.json:
|
||||||
|
# Pretty print json event
|
||||||
|
#pretty: false
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
# Path to the directory where to save the generated files. The option is
|
# Path to the directory where to save the generated files. The option is
|
||||||
# mandatory.
|
# mandatory.
|
||||||
#path: "/tmp/icingabeat"
|
#path: "/tmp/icingabeat"
|
||||||
@ -739,8 +830,13 @@ output.elasticsearch:
|
|||||||
# Boolean flag to enable or disable the output module.
|
# Boolean flag to enable or disable the output module.
|
||||||
#enabled: true
|
#enabled: true
|
||||||
|
|
||||||
# Pretty print json event
|
# Configure JSON encoding
|
||||||
#pretty: false
|
#codec.json:
|
||||||
|
# Pretty print json event
|
||||||
|
#pretty: false
|
||||||
|
|
||||||
|
# Configure escaping html symbols in strings.
|
||||||
|
#escape_html: true
|
||||||
|
|
||||||
#================================= Paths ======================================
|
#================================= Paths ======================================
|
||||||
|
|
||||||
@ -838,6 +934,22 @@ output.elasticsearch:
|
|||||||
# Path to fields.yml file to generate the template
|
# Path to fields.yml file to generate the template
|
||||||
#setup.template.fields: "${path.config}/fields.yml"
|
#setup.template.fields: "${path.config}/fields.yml"
|
||||||
|
|
||||||
|
# A list of fields to be added to the template and Kibana index pattern. Also
|
||||||
|
# specify setup.template.overwrite: true to overwrite the existing template.
|
||||||
|
# This setting is experimental.
|
||||||
|
#setup.template.append_fields:
|
||||||
|
#- name: field_name
|
||||||
|
# type: field_type
|
||||||
|
|
||||||
|
# Enable json template loading. If this is enabled, the fields.yml is ignored.
|
||||||
|
#setup.template.json.enabled: false
|
||||||
|
|
||||||
|
# Path to the json template file
|
||||||
|
#setup.template.json.path: "${path.config}/template.json"
|
||||||
|
|
||||||
|
# Name under which the template is stored in Elasticsearch
|
||||||
|
#setup.template.json.name: ""
|
||||||
|
|
||||||
# Overwrite existing template
|
# Overwrite existing template
|
||||||
#setup.template.overwrite: false
|
#setup.template.overwrite: false
|
||||||
|
|
||||||
@ -961,6 +1073,13 @@ logging.files:
|
|||||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||||
#permissions: 0600
|
#permissions: 0600
|
||||||
|
|
||||||
|
# Enable log file rotation on time intervals in addition to size-based rotation.
|
||||||
|
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
|
||||||
|
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
|
||||||
|
# reported by the local system clock. All other intervals are calculated from the
|
||||||
|
# unix epoch. Defaults to disabled.
|
||||||
|
#interval: 0
|
||||||
|
|
||||||
# Set to true to log messages in json format.
|
# Set to true to log messages in json format.
|
||||||
#logging.json: false
|
#logging.json: false
|
||||||
|
|
||||||
@ -1015,6 +1134,17 @@ logging.files:
|
|||||||
# The default is 50.
|
# The default is 50.
|
||||||
#bulk_max_size: 50
|
#bulk_max_size: 50
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Elasticsearch
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
#backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Elasticsearch after a network error. The default is 60s.
|
||||||
|
#backoff.max: 60s
|
||||||
|
|
||||||
# Configure http request timeout before failing an request to Elasticsearch.
|
# Configure http request timeout before failing an request to Elasticsearch.
|
||||||
#timeout: 90
|
#timeout: 90
|
||||||
|
|
||||||
@ -1054,6 +1184,9 @@ logging.files:
|
|||||||
# never, once, and freely. Default is never.
|
# never, once, and freely. Default is never.
|
||||||
#ssl.renegotiation: never
|
#ssl.renegotiation: never
|
||||||
|
|
||||||
|
#metrics.period: 10s
|
||||||
|
#state.period: 1m
|
||||||
|
|
||||||
#================================ HTTP Endpoint ======================================
|
#================================ HTTP Endpoint ======================================
|
||||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||||
# reasons the endpoint is disabled by default. This feature is currently experimental.
|
# reasons the endpoint is disabled by default. This feature is currently experimental.
|
||||||
@ -1068,3 +1201,8 @@ logging.files:
|
|||||||
|
|
||||||
# Port on which the HTTP endpoint will bind. Default is 5066.
|
# Port on which the HTTP endpoint will bind. Default is 5066.
|
||||||
#http.port: 5066
|
#http.port: 5066
|
||||||
|
|
||||||
|
#============================= Process Security ================================
|
||||||
|
|
||||||
|
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
|
||||||
|
#seccomp.enabled: true
|
||||||
|
@ -112,6 +112,11 @@ setup.kibana:
|
|||||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||||
#host: "localhost:5601"
|
#host: "localhost:5601"
|
||||||
|
|
||||||
|
# Kibana Space ID
|
||||||
|
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||||
|
# the Default Space will be used.
|
||||||
|
#space.id:
|
||||||
|
|
||||||
#============================= Elastic Cloud ==================================
|
#============================= Elastic Cloud ==================================
|
||||||
|
|
||||||
# These settings simplify using icingabeat with the Elastic Cloud (https://cloud.elastic.co/).
|
# These settings simplify using icingabeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||||
@ -154,6 +159,14 @@ output.elasticsearch:
|
|||||||
# Client Certificate Key
|
# Client Certificate Key
|
||||||
#ssl.key: "/etc/pki/client/cert.key"
|
#ssl.key: "/etc/pki/client/cert.key"
|
||||||
|
|
||||||
|
#================================ Procesors =====================================
|
||||||
|
|
||||||
|
# Configure processors to enhance or manipulate events generated by the beat.
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- add_host_metadata: ~
|
||||||
|
- add_cloud_metadata: ~
|
||||||
|
|
||||||
#================================ Logging =====================================
|
#================================ Logging =====================================
|
||||||
|
|
||||||
# Sets log level. The default log level is info.
|
# Sets log level. The default log level is info.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user