diff --git a/docs/version.asciidoc b/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/docs/version.asciidoc +++ b/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/icingabeat.template-es2x.json b/icingabeat.template-es2x.json index e784e5c4..9ee67b3c 100644 --- a/icingabeat.template-es2x.json +++ b/icingabeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/icingabeat.template.json b/icingabeat.template.json index 2fecc6b2..a0cdb29f 100644 --- a/icingabeat.template.json +++ b/icingabeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc index d5c0ba74..e43497b6 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc @@ -8,24 +8,17 @@ // Template, add newest changes here === Beats version HEAD -https://github.com/elastic/beats/compare/v5.0.1...master[Check the HEAD diff] +https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff] ==== Breaking changes *Affecting all Beats* *Metricbeat* -- Change data structure of experimental haproxy module. {pull}3003[3003] *Packetbeat* -*Topbeat* - *Filebeat* -- If a file is falling under ignore_older during startup, offset is now set to end of file instead of 0. - With the previous logic the whole file was sent in case a line was added and it was inconsitent with - files which were harvested previously. {pull}2907[2907] -- tail_files is now only applied on the first scan and not for all new files. {pull}2932[2932] *Winlogbeat* @@ -33,55 +26,27 @@ https://github.com/elastic/beats/compare/v5.0.1...master[Check the HEAD diff] ==== Bugfixes *Affecting all Beats* -- Fix empty benign errors logged by processor actions. {pull}3046[3046] *Metricbeat* -- Calculate the fsstat values per mounting point, and not filesystem. {pull}2777[2777] - *Packetbeat* -*Topbeat* - *Filebeat* -- Fix registry cleanup issue when files falling under ignore_older after restart. {issue}2818[2818] *Winlogbeat* ==== Added *Affecting all Beats* -- Add add_cloud_metadata processor for collecting cloud provider metadata. {pull}2728[2728] -- Added decode_json_fields processor for decoding fields containing JSON strings. {pull}2605[2605] *Metricbeat* -- Add experimental filebeat metricset in the beats module. {pull}2297[2297] -- Add experimental libbeat metricset in the beats module. {pull}2339[2339] -- Add experimental docker module. Provided by Ingensi and @douaejeouit based on dockbeat. -- Add username and password config options to the MongoDB module. {pull}2889[2889] -- Add username and password config options to the PostgreSQL module. {pull}2889[2890] -- Add system core metricset for Windows. {pull}2883[2883] -- Add a sample Redis Kibana dashboard. {pull}2916[2916] -- Add support for MongoDB 3.4 and WiredTiger metrics. {pull}2999[2999] -- Add experimental kafka module with partition metricset. {pull}2969[2969] -- Add raw config option for mysql/status metricset. {pull}3001[3001] - *Packetbeat* -*Topbeat* - *Filebeat* -- Add command line option -once to run filebeat only once and then close. {pull}2456[2456] -- Only load matching states into prospector to improve state handling {pull}2840[2840] -- Reset all states ttl on startup to make sure it is overwritten by new config {pull}2840[2840] -- Persist all states for files which fall under ignore_older to have consistent behaviour {pull}2859[2859] -- Improve shutdown behaviour with large number of files. {pull}3035[3035] *Winlogbeat* -- Add `event_logs.batch_read_size` configuration option. {pull}2641[2641] - ==== Deprecated *Affecting all Beats* @@ -90,14 +55,85 @@ https://github.com/elastic/beats/compare/v5.0.1...master[Check the HEAD diff] *Packetbeat* -*Topbeat* - *Filebeat* *Winlogbeat* //////////////////////////////////////////////////////////// +[[release-notes-5.1.1]] +=== Beats version 5.1.1 +https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits] + +==== Breaking changes + +*Metricbeat* + +- Change data structure of experimental haproxy module. {pull}3003[3003] + +*Filebeat* + +- If a file is falling under `ignore_older` during startup, offset is now set to end of file instead of 0. + With the previous logic the whole file was sent in case a line was added and it was inconsistent with + files which were harvested previously. {pull}2907[2907] +- `tail_files` is now only applied on the first scan and not for all new files. {pull}2932[2932] + +==== Bugfixes + +*Affecting all Beats* + +- Fix empty benign errors logged by processor actions. {pull}3046[3046] + +*Metricbeat* + +- Calculate the fsstat values per mounting point, and not filesystem. {pull}2777[2777] + +==== Added + +*Affecting all Beats* + +- Add add_cloud_metadata processor for collecting cloud provider metadata. {pull}2728[2728] +- Added decode_json_fields processor for decoding fields containing JSON strings. {pull}2605[2605] + +*Metricbeat* + +- Add experimental Docker module. Provided by Ingensi and @douaejeouit based on dockbeat. +- Add a sample Redis Kibana dashboard. {pull}2916[2916] +- Add support for MongoDB 3.4 and WiredTiger metrics. {pull}2999[2999] +- Add experimental kafka module with partition metricset. {pull}2969[2969] +- Add raw config option for mysql/status metricset. {pull}3001[3001] + +*Filebeat* + +- Add command line option `-once` to run Filebeat only once and then close. {pull}2456[2456] +- Only load matching states into prospector to improve state handling {pull}2840[2840] +- Reset all states ttl on startup to make sure it is overwritten by new config {pull}2840[2840] +- Persist all states for files which fall under `ignore_older` to have consistent behaviour {pull}2859[2859] +- Improve shutdown behaviour with large number of files. {pull}3035[3035] + +*Winlogbeat* + +- Add `event_logs.batch_read_size` configuration option. {pull}2641[2641] + +[[release-notes-5.1.0]] +=== Beats version 5.1.0 (skipped) + +Version 5.1.0 doesn't exist because, for a short period of time, the Elastic +Yum and Apt repositories included unreleased binaries labeled 5.1.0. To avoid +confusion and upgrade issues for the people that have installed these without +realizing, we decided to skip the 5.1.0 version and release 5.1.1 instead. + +[[release-notes-5.0.2]] +=== Beats version 5.0.2 +https://github.com/elastic/beats/compare/v5.0.1...v5.0.2[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix the `password` option in the MongoDB module. {pull}2995[2995] + + [[release-notes-5.0.1]] === Beats version 5.0.1 https://github.com/elastic/beats/compare/v5.0.0...v5.0.1[View commits] @@ -125,7 +161,7 @@ https://github.com/elastic/beats/compare/v5.0.0...v5.0.1[View commits] *Metricbeat* -- Add username and password config options to the PostgreSQL module. {pull}2889[2890] +- Add username and password config options to the PostgreSQL module. {pull}2890[2890] - Add username and password config options to the MongoDB module. {pull}2889[2889] - Add system core metricset for Windows. {pull}2883[2883] diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml index 4cb8469b..4f280a7d 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml +++ b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml @@ -1 +1 @@ -version: "6.0.0-alpha1" +version: "5.1.1" diff --git a/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc index b73059d7..d8905621 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc @@ -2,7 +2,7 @@ == Load Balancing Filebeat provides configuration options that you can use to fine -tune load balancing when sending events to multiple hosts. +tune load balancing when sending events to multiple hosts. To enable load balancing, you specify `loadbalance: true` when you configure the output. For example: @@ -32,7 +32,7 @@ The load balancer also supports multiple workers per host. The default is connections will be used. The total number of workers participating in load balancing is `number of hosts * workers`. + -Example: +Example: + [source,yaml] ------------------------------------------------------------------------------- @@ -46,7 +46,7 @@ output.logstash: worker: 2 ------------------------------------------------------------------------------- + -In this example, there are 4 workers participating in load balancing. +In this example, there are 4 workers participating in load balancing. * **Send events to `N` hosts in lock-step:** + @@ -54,7 +54,7 @@ You can configure Filebeat to send events to `N` hosts in lock-step by setting `spool_size = N * bulk_max_size`. In lock-step mode, the batch collected by the spooler is split up into smaller batches of size `bulk_max_size`. These smaller batches are load balanced between available connections. Filebeat waits for all -sub-batches to be published before it retrieves another batch from the spooler. +sub-batches to be published before it retrieves another batch from the spooler. + This mode requires more memory and CPU usage than the previous mode. + @@ -72,26 +72,3 @@ output.logstash: loadbalance: true bulk_max_size: 2048 ------------------------------------------------------------------------------- - -* **Send events in parallel and asynchronously:** -+ -You can configure Filebeat to send events in parallel and asynchronously by -setting `publish_async: true`. With this setting, Filebeat pushes a batch of -lines and then prepares a new batch of lines while waiting for the output to -ACK. This mode can improve load-balancing throughput, but requires the most -memory and CPU usage. -+ -Example: -+ -[source,yaml] -------------------------------------------------------------------------------- -filebeat.prospectors: -- input_type: log - paths: - - /var/log/*.log -filebeat.publish_async: true -output.logstash: - hosts: ["localhost:5044", "localhost:5045"] - loadbalance: true -------------------------------------------------------------------------------- - diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc index 9c549036..17d81c62 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc @@ -481,6 +481,8 @@ See <> for more information about how this setting affects load ===== publish_async +experimental[] + If enabled, the publisher pipeline in Filebeat operates in async mode preparing a new batch of lines while waiting for ACK. This option can improve load-balancing throughput at the cost of increased memory usage. The default value is false. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json index 4330d6c2..c91a6d60 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json index 519b7881..a49b7194 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/filebeat/publisher/publisher.go b/vendor/github.com/elastic/beats/filebeat/publisher/publisher.go index eb1a692e..07bbf5fe 100644 --- a/vendor/github.com/elastic/beats/filebeat/publisher/publisher.go +++ b/vendor/github.com/elastic/beats/filebeat/publisher/publisher.go @@ -6,6 +6,7 @@ import ( "github.com/elastic/beats/filebeat/input" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/publisher" ) @@ -33,6 +34,7 @@ func New( pub publisher.Publisher, ) LogPublisher { if async { + logp.Warn("Using publish_async is experimental!") return newAsyncLogPublisher(in, out, pub) } return newSyncLogPublisher(in, out, pub) diff --git a/vendor/github.com/elastic/beats/glide.yaml b/vendor/github.com/elastic/beats/glide.yaml index d86cbf3f..d3c53fbc 100644 --- a/vendor/github.com/elastic/beats/glide.yaml +++ b/vendor/github.com/elastic/beats/glide.yaml @@ -62,7 +62,7 @@ import: - package: github.com/miekg/dns version: 5d001d020961ae1c184f9f8152fdc73810481677 - package: github.com/Shopify/sarama - version: fix/sasl-handshake + version: enh/offset-replica-id repo: https://github.com/urso/sarama - package: github.com/rcrowley/go-metrics version: ab2277b1c5d15c3cba104e9cbddbdfc622df5ad8 diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json index d3876d2d..1112efac 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json index a289c7e5..1cf33994 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/libbeat/beat/version.go b/vendor/github.com/elastic/beats/libbeat/beat/version.go index 1443658d..def1eb86 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/version.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/version.go @@ -1,3 +1,3 @@ package beat -const defaultBeatVersion = "6.0.0-alpha1" +const defaultBeatVersion = "5.1.1" diff --git a/vendor/github.com/elastic/beats/libbeat/docker-compose.yml b/vendor/github.com/elastic/beats/libbeat/docker-compose.yml index 45306a44..b0ec5312 100644 --- a/vendor/github.com/elastic/beats/libbeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/libbeat/docker-compose.yml @@ -54,6 +54,8 @@ services: expose: - 9092 - 2181 + environment: + - ADVERTISED_HOST=kafka # Overloading kibana with a simple image as it is not needed here kibana: diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc index 01868822..e994b95a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc @@ -227,11 +227,12 @@ not: ==== Actions -The supported filter actions are: +The supported actions are: * <> * <> * <> + * <> See <> for the full list of possible fields. @@ -291,3 +292,82 @@ processors: condition ------ +[[add-cloud-metadata]] +===== add_cloud_metadata + +The `add_cloud_metadata` action enriches each event with instance metadata from +the machine's hosting provider. At startup it will detect the hosting provider +and cache the instance metadata. + +Three cloud providers are supported. + +- Amazon Elastic Compute Cloud (EC2) +- Digital Ocean +- Google Compute Engine (GCE) + +The simple configuration below enables the processor. + +[source,yaml] +-------------------------------------------------------------------------------- +processors: +- add_cloud_metadata: +-------------------------------------------------------------------------------- + +The `add_cloud_metadata` action has one optional configuration setting named +`timeout` that specifies the maximum amount of time to wait for a successful +response when detecting the hosting provider. The default timeout value is `3s`. +If a timeout occurs then no instance metadata will be added to the events. This +makes it possible to enable this processor for all your deployments (in the +cloud or on-premise). + +The metadata that is added to events varies by hosting provider. Below are +examples for each of the supported providers. + +_EC2_ + +[source,json] +-------------------------------------------------------------------------------- +{ + "meta": { + "cloud": { + "availability_zone": "us-east-1c", + "instance_id": "i-4e123456", + "machine_type": "t2.medium", + "provider": "ec2", + "region": "us-east-1" + } + } +} +-------------------------------------------------------------------------------- + +_Digital Ocean_ + +[source,json] +-------------------------------------------------------------------------------- +{ + "meta": { + "cloud": { + "instance_id": "1234567", + "provider": "digitalocean", + "region": "nyc2" + } + } +} +-------------------------------------------------------------------------------- + +_GCE_ + +[source,json] +-------------------------------------------------------------------------------- +{ + "meta": { + "cloud": { + "availability_zone": "projects/1234567890/zones/us-east1-b", + "instance_id": "1234556778987654321", + "machine_type": "projects/1234567890/machineTypes/f1-micro", + "project_id": "my-dev", + "provider": "gce" + } + } +} +-------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc index 8e58377c..29a01855 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc @@ -6,6 +6,8 @@ -- This section summarizes the changes in each release. +* <> +* <> * <> * <> * <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go index cf8f27b3..1eb48dd5 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go @@ -104,11 +104,23 @@ func NewClient( pipeline = nil } + u, err := url.Parse(s.URL) + if err != nil { + return nil, fmt.Errorf("failed to parse elasticsearch URL: %v", err) + } + if u.User != nil { + s.Username = u.User.Username() + s.Password, _ = u.User.Password() + u.User = nil + + // Re-write URL without credentials. + s.URL = u.String() + } + logp.Info("Elasticsearch url: %s", s.URL) // TODO: add socks5 proxy support var dialer, tlsDialer transport.Dialer - var err error dialer = transport.NetDialer(s.Timeout) tlsDialer, err = transport.TLSDialer(dialer, s.TLS, s.Timeout) @@ -142,19 +154,6 @@ func NewClient( } } - u, err := url.Parse(s.URL) - if err != nil { - return nil, fmt.Errorf("failed to parse elasticsearch URL: %v", err) - } - if u.User != nil { - s.Username = u.User.Username() - s.Password, _ = u.User.Password() - u.User = nil - - // Re-write URL without credentials. - s.URL = u.String() - } - client := &Client{ Connection: Connection{ URL: s.URL, diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go index 392c66db..7c05f56a 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go @@ -21,8 +21,8 @@ type decodeJSONFields struct { type config struct { Fields []string `config:"fields"` - MaxDepth int `config:"maxDepth" validate:"min=1"` - ProcessArray bool `config:"processArray"` + MaxDepth int `config:"max_depth" validate:"min=1"` + ProcessArray bool `config:"process_array"` } var ( @@ -38,7 +38,7 @@ func init() { processors.RegisterPlugin("decode_json_fields", configChecked(newDecodeJSONFields, requireFields("fields"), - allowedFields("fields", "maxDepth", "processArray"))) + allowedFields("fields", "max_depth", "process_array"))) } func newDecodeJSONFields(c common.Config) (processors.Processor, error) { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go index 70b0bdd5..f60e94d3 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields_test.go @@ -89,9 +89,9 @@ func TestValidJSONDepthTwo(t *testing.T) { } testConfig, _ = common.NewConfigFrom(map[string]interface{}{ - "fields": fields, - "processArray": false, - "maxDepth": 2, + "fields": fields, + "process_array": false, + "max_depth": 2, }) actual := getActualValue(t, testConfig, input) diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml index 8d93ea40..a915b2b4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml @@ -94,6 +94,26 @@ metricbeat.modules: #period: 10s #hosts: ["localhost:9092"] + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" #------------------------------- MongoDB Module ------------------------------ #- module: mongodb diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-CPU-usage.json b/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-CPU-usage.json index 28b749a4..35b1dc88 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-CPU-usage.json +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-CPU-usage.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"Docker CPU usage\",\"type\":\"area\",\"params\":{\"addLegend\":true,\"addTimeMarker\":false,\"addTooltip\":true,\"defaultYExtents\":false,\"interpolate\":\"linear\",\"legendPosition\":\"top\",\"mode\":\"stacked\",\"scale\":\"linear\",\"setYExtents\":false,\"shareYAxis\":true,\"smoothLines\":true,\"times\":[],\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"percentiles\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.cpu.usage.total\",\"percents\":[75],\"customLabel\":\"Total CPU time\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"docker.container.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1.75\",\"customLabel\":\"Container name\"}}],\"listeners\":{}}", + "visState": "{\n \"title\": \"Docker CPU usage\",\n \"type\": \"area\",\n \"params\": {\n \"addLegend\": true,\n \"addTimeMarker\": false,\n \"addTooltip\": true,\n \"defaultYExtents\": false,\n \"interpolate\": \"linear\",\n \"legendPosition\": \"top\",\n \"mode\": \"stacked\",\n \"scale\": \"linear\",\n \"setYExtents\": false,\n \"shareYAxis\": true,\n \"smoothLines\": true,\n \"times\": [],\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"percentiles\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.cpu.total.pct\",\n \"percents\": [\n 75\n ],\n \"customLabel\": \"Total CPU time\"\n }\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"docker.container.name\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1.75\",\n \"customLabel\": \"Container name\"\n }\n }\n ],\n \"listeners\": {}\n}", "description": "", "title": "Docker CPU usage", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[],\"index\":\"metricbeat-*\",\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module:docker AND metricset.name:cpu\",\"analyze_wildcard\":true}}}" + "searchSourceJSON": "{\n \"filter\": [],\n \"index\": \"metricbeat-*\",\n \"highlight\": {\n \"pre_tags\": [\n \"@kibana-highlighted-field@\"\n ],\n \"post_tags\": [\n \"@/kibana-highlighted-field@\"\n ],\n \"fields\": {\n \"*\": {}\n },\n \"require_field_match\": false,\n \"fragment_size\": 2147483647\n },\n \"query\": {\n \"query_string\": {\n \"query\": \"metricset.module:docker AND metricset.name:cpu\",\n \"analyze_wildcard\": true\n }\n }\n}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-containers.json b/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-containers.json index 025fadad..ba3ec68f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-containers.json +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/kibana/visualization/Docker-containers.json @@ -1,11 +1,11 @@ { - "visState": "{\"title\":\"Docker containers\",\"type\":\"table\",\"params\":{\"perPage\":8,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false,\"showTotal\":true,\"sort\":{\"columnIndex\":null,\"direction\":null},\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"docker.container.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Name\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.cpu.usage.total\",\"customLabel\":\"CPU usage\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.diskio.total\",\"customLabel\":\"DiskIO\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.memory.usage.pct\",\"customLabel\":\"Mem (%)\"}},{\"id\":\"6\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.memory.rss.total\",\"customLabel\":\"Mem RSS\"}},{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.container.id\",\"customLabel\":\"Number of Containers\"}}],\"listeners\":{}}", + "visState": "{\n \"title\": \"Docker containers\",\n \"type\": \"table\",\n \"params\": {\n \"perPage\": 8,\n \"showMeticsAtAllLevels\": false,\n \"showPartialRows\": false,\n \"showTotal\": true,\n \"sort\": {\n \"columnIndex\": null,\n \"direction\": null\n },\n \"totalFunc\": \"sum\"\n },\n \"aggs\": [\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"bucket\",\n \"params\": {\n \"field\": \"docker.container.name\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Name\"\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.cpu.total.pct\",\n \"customLabel\": \"CPU usage (%)\"\n }\n },\n {\n \"id\": \"4\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.diskio.total\",\n \"customLabel\": \"DiskIO\"\n }\n },\n {\n \"id\": \"5\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.memory.usage.pct\",\n \"customLabel\": \"Mem (%)\"\n }\n },\n {\n \"id\": \"6\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.memory.rss.total\",\n \"customLabel\": \"Mem RSS\"\n }\n },\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"cardinality\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.container.id\",\n \"customLabel\": \"Number of Containers\"\n }\n }\n ],\n \"listeners\": {}\n}", "description": "", "title": "Docker containers", - "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}}}}", + "uiStateJSON": "{\n \"vis\": {\n \"params\": {\n \"sort\": {\n \"columnIndex\": 1,\n \"direction\": \"asc\"\n }\n }\n }\n}", "version": 1, "savedSearchId": "Metricbeat-Docker", "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[]}" + "searchSourceJSON": "{\n \"filter\": []\n}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc index 6c9d16a6..00e568fe 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc @@ -1959,7 +1959,14 @@ Oldest offset of the partition. [float] -=== kafka.partition.partition +== partition Fields + +Partition data. + + + +[float] +=== kafka.partition.partition.id type: long @@ -1967,7 +1974,55 @@ Partition id. [float] -=== kafka.partition.topic +=== kafka.partition.partition.leader + +type: long + +Leader id (broker). + + +[float] +=== kafka.partition.partition.isr + +type: list + +List of isr ids. + + +[float] +=== kafka.partition.partition.replica + +type: long + +Replica id (broker). + + +[float] +=== kafka.partition.partition.insync_replica + +type: boolean + +Indicates if replica is included in the in-sync replicate set (ISR). + + +[float] +=== kafka.partition.partition.error.code + +type: long + +Error code from fetching partition. + + +[float] +=== kafka.partition.topic.error.code + +type: long + +topic error code. + + +[float] +=== kafka.partition.topic.name type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc index 8fee81b5..a8f68041 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc @@ -24,6 +24,26 @@ metricbeat.modules: #period: 10s #hosts: ["localhost:9092"] + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml index dd073e4c..40bdf7c8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml @@ -94,6 +94,26 @@ metricbeat.modules: #period: 10s #hosts: ["localhost:9092"] + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" #------------------------------- MongoDB Module ------------------------------ #- module: mongodb diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json index e5141696..cc7e9c90 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { @@ -950,12 +950,43 @@ } }, "partition": { - "type": "long" + "properties": { + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "id": { + "type": "long" + }, + "insync_replica": { + "type": "boolean" + }, + "leader": { + "type": "long" + }, + "replica": { + "type": "long" + } + } }, "topic": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" + "properties": { + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } } } } diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json index 2edac8ca..dcb6fadc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { @@ -957,11 +957,42 @@ } }, "partition": { - "type": "long" + "properties": { + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "id": { + "type": "long" + }, + "insync_replica": { + "type": "boolean" + }, + "leader": { + "type": "long" + }, + "replica": { + "type": "long" + } + } }, "topic": { - "ignore_above": 1024, - "type": "keyword" + "properties": { + "error": { + "properties": { + "code": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } } } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-CPU-usage.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-CPU-usage.json index 28b749a4..35b1dc88 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-CPU-usage.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-CPU-usage.json @@ -1,10 +1,10 @@ { - "visState": "{\"title\":\"Docker CPU usage\",\"type\":\"area\",\"params\":{\"addLegend\":true,\"addTimeMarker\":false,\"addTooltip\":true,\"defaultYExtents\":false,\"interpolate\":\"linear\",\"legendPosition\":\"top\",\"mode\":\"stacked\",\"scale\":\"linear\",\"setYExtents\":false,\"shareYAxis\":true,\"smoothLines\":true,\"times\":[],\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"percentiles\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.cpu.usage.total\",\"percents\":[75],\"customLabel\":\"Total CPU time\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"docker.container.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1.75\",\"customLabel\":\"Container name\"}}],\"listeners\":{}}", + "visState": "{\n \"title\": \"Docker CPU usage\",\n \"type\": \"area\",\n \"params\": {\n \"addLegend\": true,\n \"addTimeMarker\": false,\n \"addTooltip\": true,\n \"defaultYExtents\": false,\n \"interpolate\": \"linear\",\n \"legendPosition\": \"top\",\n \"mode\": \"stacked\",\n \"scale\": \"linear\",\n \"setYExtents\": false,\n \"shareYAxis\": true,\n \"smoothLines\": true,\n \"times\": [],\n \"yAxis\": {}\n },\n \"aggs\": [\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"percentiles\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.cpu.total.pct\",\n \"percents\": [\n 75\n ],\n \"customLabel\": \"Total CPU time\"\n }\n },\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"date_histogram\",\n \"schema\": \"segment\",\n \"params\": {\n \"field\": \"@timestamp\",\n \"interval\": \"auto\",\n \"customInterval\": \"2h\",\n \"min_doc_count\": 1,\n \"extended_bounds\": {}\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"group\",\n \"params\": {\n \"field\": \"docker.container.name\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1.75\",\n \"customLabel\": \"Container name\"\n }\n }\n ],\n \"listeners\": {}\n}", "description": "", "title": "Docker CPU usage", "uiStateJSON": "{}", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[],\"index\":\"metricbeat-*\",\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"metricset.module:docker AND metricset.name:cpu\",\"analyze_wildcard\":true}}}" + "searchSourceJSON": "{\n \"filter\": [],\n \"index\": \"metricbeat-*\",\n \"highlight\": {\n \"pre_tags\": [\n \"@kibana-highlighted-field@\"\n ],\n \"post_tags\": [\n \"@/kibana-highlighted-field@\"\n ],\n \"fields\": {\n \"*\": {}\n },\n \"require_field_match\": false,\n \"fragment_size\": 2147483647\n },\n \"query\": {\n \"query_string\": {\n \"query\": \"metricset.module:docker AND metricset.name:cpu\",\n \"analyze_wildcard\": true\n }\n }\n}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-containers.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-containers.json index 025fadad..ba3ec68f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-containers.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/kibana/visualization/Docker-containers.json @@ -1,11 +1,11 @@ { - "visState": "{\"title\":\"Docker containers\",\"type\":\"table\",\"params\":{\"perPage\":8,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false,\"showTotal\":true,\"sort\":{\"columnIndex\":null,\"direction\":null},\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"docker.container.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Name\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.cpu.usage.total\",\"customLabel\":\"CPU usage\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.diskio.total\",\"customLabel\":\"DiskIO\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.memory.usage.pct\",\"customLabel\":\"Mem (%)\"}},{\"id\":\"6\",\"enabled\":true,\"type\":\"max\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.memory.rss.total\",\"customLabel\":\"Mem RSS\"}},{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"docker.container.id\",\"customLabel\":\"Number of Containers\"}}],\"listeners\":{}}", + "visState": "{\n \"title\": \"Docker containers\",\n \"type\": \"table\",\n \"params\": {\n \"perPage\": 8,\n \"showMeticsAtAllLevels\": false,\n \"showPartialRows\": false,\n \"showTotal\": true,\n \"sort\": {\n \"columnIndex\": null,\n \"direction\": null\n },\n \"totalFunc\": \"sum\"\n },\n \"aggs\": [\n {\n \"id\": \"2\",\n \"enabled\": true,\n \"type\": \"terms\",\n \"schema\": \"bucket\",\n \"params\": {\n \"field\": \"docker.container.name\",\n \"size\": 5,\n \"order\": \"desc\",\n \"orderBy\": \"1\",\n \"customLabel\": \"Name\"\n }\n },\n {\n \"id\": \"3\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.cpu.total.pct\",\n \"customLabel\": \"CPU usage (%)\"\n }\n },\n {\n \"id\": \"4\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.diskio.total\",\n \"customLabel\": \"DiskIO\"\n }\n },\n {\n \"id\": \"5\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.memory.usage.pct\",\n \"customLabel\": \"Mem (%)\"\n }\n },\n {\n \"id\": \"6\",\n \"enabled\": true,\n \"type\": \"max\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.memory.rss.total\",\n \"customLabel\": \"Mem RSS\"\n }\n },\n {\n \"id\": \"1\",\n \"enabled\": true,\n \"type\": \"cardinality\",\n \"schema\": \"metric\",\n \"params\": {\n \"field\": \"docker.container.id\",\n \"customLabel\": \"Number of Containers\"\n }\n }\n ],\n \"listeners\": {}\n}", "description": "", "title": "Docker containers", - "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}}}}", + "uiStateJSON": "{\n \"vis\": {\n \"params\": {\n \"sort\": {\n \"columnIndex\": 1,\n \"direction\": \"asc\"\n }\n }\n }\n}", "version": 1, "savedSearchId": "Metricbeat-Docker", "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[]}" + "searchSourceJSON": "{\n \"filter\": []\n}" } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml index b68543e7..025c1f4a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml @@ -4,3 +4,23 @@ #period: 10s #hosts: ["localhost:9092"] + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/data.json index 34e0398e..a41e118b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/data.json @@ -11,14 +11,18 @@ "id": 0 }, "offset": { - "newest": 13, + "newest": 11, "oldest": 0 }, - "partition": 0, - "replicas": [ - 0 - ], - "topic": "testtopic" + "partition": { + "id": 0, + "insync_replica": true, + "leader": 0, + "replica": 0 + }, + "topic": { + "name": "test-metricbeat-70692474374989458" + } } }, "metricset": { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/fields.yml index f6d76ccd..8c7f9230 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/_meta/fields.yml @@ -16,14 +16,49 @@ type: long description: > Oldest offset of the partition. + - name: partition + type: group + description: > + Partition data. + fields: + - name: id + type: long + description: > + Partition id. + + - name: leader + type: long + description: > + Leader id (broker). + - name: isr + type: list + description: > + List of isr ids. + - name: replica + type: long + description: > + Replica id (broker). + + - name: insync_replica + type: boolean + description: > + Indicates if replica is included in the in-sync replicate set (ISR). + + - name: error.code + type: long + description: > + Error code from fetching partition. + + - name: topic.error.code type: long description: > - Partition id. - - name: topic + topic error code. + - name: topic.name type: keyword description: > Topic name + - name: broker.id type: long description: > @@ -32,3 +67,5 @@ type: keyword description: > Broker address + + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go new file mode 100644 index 00000000..e36e8cb2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/config.go @@ -0,0 +1,38 @@ +package partition + +import ( + "fmt" + "time" + + "github.com/elastic/beats/libbeat/outputs" +) + +type connConfig struct { + Retries int `config:"retries" validate:"min=0"` + Backoff time.Duration `config:"backoff" validate:"min=0"` + TLS *outputs.TLSConfig `config:"ssl"` + Username string `config:"username"` + Password string `config:"password"` + ClientID string `config:"client_id"` + Topics []string `config:"topics"` +} + +type metaConfig struct { +} + +var defaultConfig = connConfig{ + Retries: 3, + Backoff: 250 * time.Millisecond, + TLS: nil, + Username: "", + Password: "", + ClientID: "metricbeat", +} + +func (c *connConfig) Validate() error { + if c.Username != "" && c.Password == "" { + return fmt.Errorf("password must be set when username is configured") + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go index 9a25640f..59c02e79 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go @@ -1,8 +1,14 @@ package partition import ( + "errors" + "fmt" + "io" + "time" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" @@ -19,75 +25,300 @@ func init() { // MetricSet type defines all fields of the partition MetricSet type MetricSet struct { mb.BaseMetricSet - client sarama.Client + + broker *sarama.Broker + cfg *sarama.Config + id int32 + topics []string } -// New creates a new instance of the partition MetricSet -func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - logp.Warn("EXPERIMENTAL: The %v %v metricset is experimental", base.Module().Name(), base.Name()) +var noID int32 = -1 - return &MetricSet{BaseMetricSet: base}, nil +var errFailQueryOffset = errors.New("operation failed") + +// New create a new instance of the partition MetricSet +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + config := defaultConfig + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + tls, err := outputs.LoadTLSConfig(config.TLS) + if err != nil { + return nil, err + } + + cfg := sarama.NewConfig() + cfg.Net.DialTimeout = base.Module().Config().Timeout + cfg.Net.ReadTimeout = base.Module().Config().Timeout + cfg.ClientID = config.ClientID + cfg.Metadata.Retry.Max = config.Retries + cfg.Metadata.Retry.Backoff = config.Backoff + if tls != nil { + cfg.Net.TLS.Enable = true + cfg.Net.TLS.Config = tls.BuildModuleConfig("") + } + if config.Username != "" { + cfg.Net.SASL.Enable = true + cfg.Net.SASL.User = config.Username + cfg.Net.SASL.Password = config.Password + } + + broker := sarama.NewBroker(base.Host()) + return &MetricSet{ + BaseMetricSet: base, + broker: broker, + cfg: cfg, + id: noID, + topics: config.Topics, + }, nil +} + +func (m *MetricSet) connect() (*sarama.Broker, error) { + b := m.broker + if err := b.Open(m.cfg); err != nil { + return nil, err + } + + if m.id != noID { + return b, nil + } + + // current broker is bootstrap only. Get metadata to find id: + meta, err := queryMetadataWithRetry(b, m.cfg, m.topics) + if err != nil { + closeBroker(b) + return nil, err + } + + addr := b.Addr() + for _, other := range meta.Brokers { + if other.Addr() == addr { + m.id = other.ID() + break + } + } + + if m.id == noID { + closeBroker(b) + err = fmt.Errorf("No advertised broker with address %v found", addr) + return nil, err + } + + return b, nil } // Fetch partition stats list from kafka func (m *MetricSet) Fetch() ([]common.MapStr, error) { - if m.client == nil { - config := sarama.NewConfig() - config.Net.DialTimeout = m.Module().Config().Timeout - config.Net.ReadTimeout = m.Module().Config().Timeout - config.ClientID = "metricbeat" - - client, err := sarama.NewClient([]string{m.Host()}, config) - if err != nil { - return nil, err - } - m.client = client + b, err := m.connect() + if err != nil { + return nil, err } - topics, err := m.client.Topics() + defer closeBroker(b) + response, err := queryMetadataWithRetry(b, m.cfg, m.topics) if err != nil { return nil, err } events := []common.MapStr{} - for _, topic := range topics { - partitions, err := m.client.Partitions(topic) - if err != nil { - logp.Err("Fetch partition info for topic %s: %s", topic, err) + evtBroker := common.MapStr{ + "id": m.id, + "address": b.Addr(), + } + + for _, topic := range response.Topics { + evtTopic := common.MapStr{ + "name": topic.Name, } - for _, partition := range partitions { - newestOffset, err := m.client.GetOffset(topic, partition, sarama.OffsetNewest) - if err != nil { - logp.Err("Fetching newest offset information for partition %s in topic %s: %s", partition, topic, err) + if topic.Err != 0 { + evtTopic["error"] = common.MapStr{ + "code": topic.Err, + } + } + + for _, partition := range topic.Partitions { + // partition offsets can be queried from leader only + if m.id != partition.Leader { + continue } - oldestOffset, err := m.client.GetOffset(topic, partition, sarama.OffsetOldest) - if err != nil { - logp.Err("Fetching oldest offset information for partition %s in topic %s: %s", partition, topic, err) - } + // collect offsets for all replicas + for _, id := range partition.Replicas { - broker, err := m.client.Leader(topic, partition) - if err != nil { - logp.Err("Fetching brocker for partition %s in topic %s: %s", partition, topic, err) - } + // Get oldest and newest available offsets + offOldest, offNewest, offOK, err := queryOffsetRange(b, id, topic.Name, partition.ID) - event := common.MapStr{ - "topic": topic, - "partition": partition, - "offset": common.MapStr{ - "oldest": oldestOffset, - "newest": newestOffset, - }, - "broker": common.MapStr{ - "id": broker.ID(), - "address": broker.Addr(), - }, - } + if !offOK { + if err == nil { + err = errFailQueryOffset + } - events = append(events, event) + logp.Err("Failed to query kafka partition (%v:%v) offsets: %v", + topic.Name, partition.ID, err) + continue + } + + partitionEvent := common.MapStr{ + "id": partition.ID, + "leader": partition.Leader, + "replica": id, + "insync_replica": hasID(id, partition.Isr), + } + + if partition.Err != 0 { + partitionEvent["error"] = common.MapStr{ + "code": partition.Err, + } + } + + // create event + event := common.MapStr{ + "topic": evtTopic, + "broker": evtBroker, + "partition": partitionEvent, + "offset": common.MapStr{ + "newest": offNewest, + "oldest": offOldest, + }, + } + + events = append(events, event) + } } } return events, nil } + +func hasID(id int32, lst []int32) bool { + for _, other := range lst { + if id == other { + return true + } + } + return false +} + +// queryOffsetRange queries the broker for the oldest and the newest offsets in +// a kafka topics partition for a given replica. +func queryOffsetRange( + b *sarama.Broker, + replicaID int32, + topic string, + partition int32, +) (int64, int64, bool, error) { + oldest, okOld, err := queryOffset(b, replicaID, topic, partition, sarama.OffsetOldest) + if err != nil { + return -1, -1, false, err + } + + newest, okNew, err := queryOffset(b, replicaID, topic, partition, sarama.OffsetNewest) + if err != nil { + return -1, -1, false, err + } + + return oldest, newest, okOld && okNew, nil +} + +func queryOffset( + b *sarama.Broker, + replicaID int32, + topic string, + partition int32, + time int64, +) (int64, bool, error) { + req := &sarama.OffsetRequest{} + if replicaID != noID { + req.SetReplicaID(replicaID) + } + req.AddBlock(topic, partition, time, 1) + resp, err := b.GetAvailableOffsets(req) + if err != nil { + return -1, false, err + } + + block := resp.GetBlock(topic, partition) + if len(block.Offsets) == 0 { + return -1, false, nil + } + + return block.Offsets[0], true, nil +} + +func closeBroker(b *sarama.Broker) { + if ok, _ := b.Connected(); ok { + b.Close() + } +} + +func queryMetadataWithRetry( + b *sarama.Broker, + cfg *sarama.Config, + topics []string, +) (r *sarama.MetadataResponse, err error) { + err = withRetry(b, cfg, func() (e error) { + r, e = b.GetMetadata(&sarama.MetadataRequest{topics}) + return + }) + return +} + +func withRetry( + b *sarama.Broker, + cfg *sarama.Config, + f func() error, +) error { + var err error + for max := 0; max < cfg.Metadata.Retry.Max; max++ { + if ok, _ := b.Connected(); !ok { + if err = b.Open(cfg); err == nil { + err = f() + } + } else { + err = f() + } + + if err == nil { + return nil + } + + retry, reconnect := checkRetryQuery(err) + if !retry { + return err + } + + time.Sleep(cfg.Metadata.Retry.Backoff) + if reconnect { + closeBroker(b) + } + } + return err +} + +func checkRetryQuery(err error) (retry, reconnect bool) { + if err == nil { + return false, false + } + + if err == io.EOF { + return true, true + } + + k, ok := err.(sarama.KError) + if !ok { + return false, false + } + + switch k { + case sarama.ErrLeaderNotAvailable, sarama.ErrReplicaNotAvailable, + sarama.ErrOffsetsLoadInProgress, sarama.ErrRebalanceInProgress: + return true, false + case sarama.ErrRequestTimedOut, sarama.ErrBrokerNotAvailable, + sarama.ErrNetworkException: + return true, true + } + + return false, false +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go index 41aca7ec..a76f75b2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition_integration_test.go @@ -64,13 +64,13 @@ func TestTopic(t *testing.T) { // Its possible that other topics exists -> select the right data for _, data := range dataBefore { - if data["topic"] == testTopic { + if data["topic"].(common.MapStr)["name"] == testTopic { offsetBefore = data["offset"].(common.MapStr)["newest"].(int64) } } for _, data := range dataAfter { - if data["topic"] == testTopic { + if data["topic"].(common.MapStr)["name"] == testTopic { offsetAfter = data["offset"].(common.MapStr)["newest"].(int64) } } diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json index aa02e310..02baf695 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json index baee6c8e..7ff8407f 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot index 143221a8..58389f77 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot @@ -2,7 +2,7 @@ FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest MAINTAINER Elastic Docker Team -ARG KIBANA_DOWNLOAD_URL=https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-linux-x86_64.tar.gz +ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.1-acecfcb6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz ARG X_PACK_URL EXPOSE 5601 diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot index 11e030c3..b5ac225c 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot @@ -2,8 +2,8 @@ FROM java:8-jre ENV LS_VERSION 5 -ENV VERSION 6.0.0-alpha1-SNAPSHOT -ENV URL https://snapshots.elastic.co/downloads/logstash/logstash-${VERSION}.tar.gz +ENV VERSION 5.1.1 +ENV URL https://staging.elastic.co/5.1.1-acecfcb6/downloads/logstash/logstash-${VERSION}.tar.gz ENV PATH $PATH:/opt/logstash-$VERSION/bin # Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .` diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml index 604a4eba..5fe6a111 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml @@ -7,8 +7,8 @@ services: context: ./docker/elasticsearch dockerfile: Dockerfile-snapshot args: - ELASTIC_VERSION: 6.0.0-alpha1-SNAPSHOT - ES_DOWNLOAD_URL: https://snapshots.elastic.co/downloads/elasticsearch + ELASTIC_VERSION: 5.1.1 + ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.1-acecfcb6/downloads/elasticsearch' #XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -26,5 +26,5 @@ services: context: ./docker/kibana dockerfile: Dockerfile-snapshot args: [ ] - #KIBANA_DOWNLOAD_URL: https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-linux-x86_64.tar.gz + #KIBANA_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.0-1106bba6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz' #X_PACK_URL: http://snapshots.elastic.co/downloads/kibana-plugins/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go index c66d8f70..2f74df3d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/offset_request.go @@ -22,11 +22,20 @@ func (b *offsetRequestBlock) decode(pd packetDecoder) (err error) { } type OffsetRequest struct { - blocks map[string]map[int32]*offsetRequestBlock + replicaID *int32 + blocks map[string]map[int32]*offsetRequestBlock + + storeReplicaID int32 } func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients + if r.replicaID == nil { + // default replica ID is always -1 for clients + pe.putInt32(-1) + } else { + pe.putInt32(*r.replicaID) + } + err := pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -100,6 +109,11 @@ func (r *OffsetRequest) requiredVersion() KafkaVersion { return minVersion } +func (r *OffsetRequest) SetReplicaID(id int32) { + r.storeReplicaID = id + r.replicaID = &r.storeReplicaID +} + func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetRequestBlock) diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc index 1dd02602..20e8c74c 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc @@ -55,8 +55,11 @@ winlogbeat.event_logs: ===== event_logs.batch_read_size +experimental[] + The maximum number of event log records to read from the Windows API in a single -batch. The default batch size is 100. *{vista_and_newer}* +batch. The default batch size is 100. Most Windows versions return an error if +the value is larger than 1024. *{vista_and_newer}* Winlogbeat starts a goroutine (a lightweight thread) to read from each individual event log. The goroutine reads a batch of event log records using the diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc index 454eb2d4..07dfbaf0 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc @@ -1,2 +1,2 @@ -:stack-version: 6.0.0-alpha1 -:doc-branch: master +:stack-version: 5.1.1 +:doc-branch: 5.1 diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json index 4cff819e..56b6ea18 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json index 8d747356..5491615d 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "6.0.0-alpha1" + "version": "5.1.1" }, "dynamic_templates": [ {