mirror of
https://github.com/Icinga/icingabeat.git
synced 2025-04-08 17:15:05 +02:00
Update libbeat to v5.1.2
This commit is contained in:
parent
1ea21190d2
commit
a9bb4a709c
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
18
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
18
vendor/github.com/elastic/beats/CHANGELOG.asciidoc
generated
vendored
@ -8,7 +8,7 @@
|
||||
// Template, add newest changes here
|
||||
|
||||
=== Beats version HEAD
|
||||
https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
||||
https://github.com/elastic/beats/compare/v5.1.2...5.1[Check the HEAD diff]
|
||||
|
||||
==== Breaking changes
|
||||
|
||||
@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
||||
*Packetbeat*
|
||||
|
||||
*Filebeat*
|
||||
- Fix registry migration issue from old states were files were only harvested after second restart. {pull}3322[3322]
|
||||
|
||||
*Winlogbeat*
|
||||
|
||||
@ -61,6 +62,21 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff]
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
[[release-notes-5.1.2]]
|
||||
=== Beats version 5.1.2
|
||||
https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits]
|
||||
|
||||
==== Bugfixes
|
||||
|
||||
*Packetbeat*
|
||||
|
||||
- Fix error on importing dashboards due to colons in the Cassandra dashboard. {issue}3140[3140]
|
||||
- Fix error on importing dashboards due to the wrong type for the geo_point fields. {pull}3147[3147]
|
||||
|
||||
*Winlogbeat*
|
||||
|
||||
- Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076]
|
||||
|
||||
[[release-notes-5.1.1]]
|
||||
=== Beats version 5.1.1
|
||||
https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits]
|
||||
|
4
vendor/github.com/elastic/beats/Makefile
generated
vendored
4
vendor/github.com/elastic/beats/Makefile
generated
vendored
@ -3,6 +3,7 @@ BUILD_DIR=build
|
||||
COVERAGE_DIR=${BUILD_DIR}/coverage
|
||||
BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat
|
||||
PROJECTS=libbeat ${BEATS}
|
||||
PROJECTS_ENV=libbeat metricbeat
|
||||
SNAPSHOT?=yes
|
||||
|
||||
# Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
|
||||
@ -12,6 +13,9 @@ testsuite:
|
||||
$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
|
||||
#$(MAKE) -C generate test
|
||||
|
||||
stop-environments:
|
||||
$(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;)
|
||||
|
||||
# Runs unit and system tests without coverage and race detection.
|
||||
.PHONY: test
|
||||
test:
|
||||
|
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
2
vendor/github.com/elastic/beats/dev-tools/packer/version.yml
generated
vendored
@ -1 +1 @@
|
||||
version: "5.1.1"
|
||||
version: "5.1.2"
|
||||
|
88
vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc
generated
vendored
88
vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc
generated
vendored
@ -1,16 +1,32 @@
|
||||
[[filtering-and-enhancing-data]]
|
||||
== Filtering and Enhancing the Exported Data
|
||||
|
||||
When your use case requires only a subset of the data exported by Filebeat or you need to add metadata, you can <<filebeat-filtering-overview,use Filebeat config options to filter the data>>, or you can <<defining-processors,define processors>>.
|
||||
Your use case might require only a subset of the data exported by Filebeat, or
|
||||
you might need to enhance the exported data (for example, by adding metadata).
|
||||
Filebeat provides a couple of options for filtering and enhancing exported
|
||||
data. You can:
|
||||
|
||||
* <<filebeat-filtering-overview,Define filters at the prospector level>> to
|
||||
configure each prospector to include or exclude specific lines or files.
|
||||
* <<defining-processors,Define processors>> to configure global processing
|
||||
across all data exported by Filebeat.
|
||||
|
||||
[float]
|
||||
[[filebeat-filtering-overview]]
|
||||
=== Filebeat Config Options for Filtering
|
||||
=== Filtering at the Prospector Level
|
||||
|
||||
You can specify configuration options in the `filebeat` section of the config file to define regular expressions that
|
||||
match the lines you want to include and/or exclude from the output. The supported options are <<include-lines,`include_lines`>>, <<exclude-lines,`exclude_lines`>>, and <<exclude-files,`exclude_files`>>.
|
||||
You can specify filtering options at the prospector level to configure which
|
||||
lines or files are included or excluded in the output. This allows you to
|
||||
specify different filtering criteria for each prospector.
|
||||
|
||||
For example, you can use the `include_lines` option to export any lines that start with "ERR" or "WARN":
|
||||
You configure prospector-level filtering in the `filebeat.prospectors` section
|
||||
of the config file by specifying regular expressions that match the lines you
|
||||
want to include and/or exclude from the output. The supported options are
|
||||
<<include-lines,`include_lines`>>, <<exclude-lines,`exclude_lines`>>, and
|
||||
<<exclude-files,`exclude_files`>>.
|
||||
|
||||
For example, you can use the `include_lines` option to export any lines that
|
||||
start with "ERR" or "WARN":
|
||||
|
||||
[source,yaml]
|
||||
-------------------------------------------------------------------------------------
|
||||
@ -21,9 +37,11 @@ filebeat.prospectors:
|
||||
include_lines: ["^ERR", "^WARN"]
|
||||
-------------------------------------------------------------------------------------
|
||||
|
||||
The disadvantage of this approach is that you need to implement a configuration option for each filtering criteria that you need.
|
||||
The disadvantage of this approach is that you need to implement a
|
||||
configuration option for each filtering criteria that you need.
|
||||
|
||||
See <<configuration-filebeat-options,Filebeat configuration options>> for more information about each option.
|
||||
See <<configuration-filebeat-options,Filebeat configuration options>> for more
|
||||
information about each option.
|
||||
|
||||
[float]
|
||||
[[defining-processors]]
|
||||
@ -31,7 +49,11 @@ See <<configuration-filebeat-options,Filebeat configuration options>> for more i
|
||||
|
||||
include::../../libbeat/docs/processors.asciidoc[]
|
||||
|
||||
For example, the following configuration drops all the DEBUG messages.
|
||||
[float]
|
||||
[[drop-event-example]]
|
||||
==== Drop Event Example
|
||||
|
||||
The following configuration drops all the DEBUG messages.
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------
|
||||
@ -53,4 +75,54 @@ processors:
|
||||
source: "test"
|
||||
----------------
|
||||
|
||||
[float]
|
||||
[[decode-json-example]]
|
||||
==== Decode JSON Example
|
||||
|
||||
In the following example, the fields exported by Filebeat include a
|
||||
field, `inner`, whose value is a JSON object encoded as a string:
|
||||
|
||||
[source,json]
|
||||
-----------------------------------------------------
|
||||
{ "outer": "value", "inner": "{\"data\": \"value\"}" }
|
||||
-----------------------------------------------------
|
||||
|
||||
The following configuration decodes the inner JSON object:
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------
|
||||
filebeat.prospectors:
|
||||
- paths:
|
||||
- input.json
|
||||
json.keys_under_root: true
|
||||
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
fields: ["inner"]
|
||||
|
||||
output.console.pretty: true
|
||||
-----------------------------------------------------
|
||||
|
||||
The resulting output looks something like this:
|
||||
|
||||
["source","json",subs="attributes"]
|
||||
-----------------------------------------------------
|
||||
{
|
||||
"@timestamp": "2016-12-06T17:38:11.541Z",
|
||||
"beat": {
|
||||
"hostname": "host.example.com",
|
||||
"name": "host.example.com",
|
||||
"version": "{version}"
|
||||
},
|
||||
"inner": {
|
||||
"data": "value"
|
||||
},
|
||||
"input_type": "log",
|
||||
"offset": 55,
|
||||
"outer": "value",
|
||||
"source": "input.json",
|
||||
"type": "log"
|
||||
}
|
||||
-----------------------------------------------------
|
||||
|
||||
See <<configuration-processors>> for more information.
|
||||
|
2
vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc
generated
vendored
@ -12,7 +12,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-filebeat-options>>
|
||||
* <<configuration-global-options>>
|
||||
* <<configuration-general>>
|
||||
* <<configuration-processors>>
|
||||
* <<elasticsearch-output>>
|
||||
* <<logstash-output>>
|
||||
* <<kafka-output>>
|
||||
@ -22,6 +21,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-output-ssl>>
|
||||
* <<configuration-path>>
|
||||
* <<configuration-logging>>
|
||||
* <<configuration-processors>>
|
||||
|
||||
include::configuration/filebeat-options.asciidoc[]
|
||||
|
||||
|
@ -560,11 +560,11 @@ filebeat.shutdown_timeout: 5s
|
||||
|
||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
|
1
vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
2
vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/filebeat/filebeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/filebeat/filebeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
24
vendor/github.com/elastic/beats/filebeat/registrar/registrar.go
generated
vendored
24
vendor/github.com/elastic/beats/filebeat/registrar/registrar.go
generated
vendored
@ -118,15 +118,7 @@ func (r *Registrar) loadStates() error {
|
||||
return fmt.Errorf("Error decoding states: %s", err)
|
||||
}
|
||||
|
||||
// Set all states to finished and disable TTL on restart
|
||||
// For all states covered by a prospector, TTL will be overwritten with the prospector value
|
||||
for key, state := range states {
|
||||
state.Finished = true
|
||||
// Set ttl to -2 to easily spot which states are not managed by a prospector
|
||||
state.TTL = -2
|
||||
states[key] = state
|
||||
}
|
||||
|
||||
states = resetStates(states)
|
||||
r.states.SetStates(states)
|
||||
logp.Info("States Loaded from registrar: %+v", len(states))
|
||||
|
||||
@ -176,6 +168,7 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool {
|
||||
// Convert old states to new states
|
||||
logp.Info("Old registry states found: %v", len(oldStates))
|
||||
states := convertOldStates(oldStates)
|
||||
states = resetStates(states)
|
||||
r.states.SetStates(states)
|
||||
|
||||
// Rewrite registry in new format
|
||||
@ -186,6 +179,19 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// resetStates sets all states to finished and disable TTL on restart
|
||||
// For all states covered by a prospector, TTL will be overwritten with the prospector value
|
||||
func resetStates(states []file.State) []file.State {
|
||||
|
||||
for key, state := range states {
|
||||
state.Finished = true
|
||||
// Set ttl to -2 to easily spot which states are not managed by a prospector
|
||||
state.TTL = -2
|
||||
states[key] = state
|
||||
}
|
||||
return states
|
||||
}
|
||||
|
||||
func convertOldStates(oldStates map[string]file.State) []file.State {
|
||||
// Convert old states to new states
|
||||
states := []file.State{}
|
||||
|
178
vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py
generated
vendored
Normal file
178
vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
from filebeat import BaseTest
|
||||
|
||||
import os
|
||||
import platform
|
||||
import time
|
||||
import shutil
|
||||
import json
|
||||
import stat
|
||||
from nose.plugins.skip import Skip, SkipTest
|
||||
|
||||
|
||||
class Test(BaseTest):
|
||||
|
||||
def test_migration_non_windows(self):
|
||||
"""
|
||||
Tests if migration from old filebeat registry to new format works
|
||||
"""
|
||||
|
||||
if os.name == "nt":
|
||||
raise SkipTest
|
||||
|
||||
registry_file = self.working_dir + '/registry'
|
||||
|
||||
# Write old registry file
|
||||
with open(registry_file, 'w') as f:
|
||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}')
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||
clean_removed="false",
|
||||
clean_inactive="0",
|
||||
)
|
||||
|
||||
filebeat = self.start_beat()
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old registry states found: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
filebeat.check_kill_and_wait()
|
||||
|
||||
# Check if content is same as above
|
||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||
|
||||
# Compare first entry
|
||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Compare second entry
|
||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Make sure the right number of entries is in
|
||||
data = self.get_registry()
|
||||
assert len(data) == 2
|
||||
|
||||
def test_migration_windows(self):
|
||||
"""
|
||||
Tests if migration from old filebeat registry to new format works
|
||||
"""
|
||||
|
||||
if os.name != "nt":
|
||||
raise SkipTest
|
||||
|
||||
registry_file = self.working_dir + '/registry'
|
||||
|
||||
# Write old registry file
|
||||
with open(registry_file, 'w') as f:
|
||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}')
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||
)
|
||||
|
||||
filebeat = self.start_beat()
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old registry states found: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
filebeat.check_kill_and_wait()
|
||||
|
||||
# Check if content is same as above
|
||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||
|
||||
# Compare first entry
|
||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Compare second entry
|
||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Make sure the right number of entries is in
|
||||
data = self.get_registry()
|
||||
assert len(data) == 2
|
||||
|
||||
def test_migration_continue_reading(self):
|
||||
"""
|
||||
Tests if after the migration filebeat keeps reading the file
|
||||
"""
|
||||
|
||||
os.mkdir(self.working_dir + "/log/")
|
||||
testfile1 = self.working_dir + "/log/test.log"
|
||||
|
||||
with open(testfile1, 'w') as f:
|
||||
f.write("entry10\n")
|
||||
|
||||
registry_file = self.working_dir + '/registry'
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/*",
|
||||
output_file_filename="filebeat_1",
|
||||
)
|
||||
|
||||
# Run filebeat to create a registry
|
||||
filebeat = self.start_beat(output="filebeat1.log")
|
||||
self.wait_until(
|
||||
lambda: self.output_has(lines=1, output_file="output/filebeat_1"),
|
||||
max_timeout=10)
|
||||
filebeat.check_kill_and_wait()
|
||||
|
||||
# Create old registry file out of the new one
|
||||
r = self.get_registry()
|
||||
registry_entry = r[0]
|
||||
del registry_entry["timestamp"]
|
||||
del registry_entry["ttl"]
|
||||
old_registry = {registry_entry["source"]: registry_entry}
|
||||
|
||||
# Overwrite registry
|
||||
with open(registry_file, 'w') as f:
|
||||
json.dump(old_registry, f)
|
||||
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/*",
|
||||
output_file_filename="filebeat_2",
|
||||
)
|
||||
|
||||
filebeat = self.start_beat(output="filebeat2.log")
|
||||
|
||||
# Wait until state is migrated
|
||||
self.wait_until(
|
||||
lambda: self.log_contains(
|
||||
"Old states converted to new states and written to registrar: 1", "filebeat2.log"),
|
||||
max_timeout=10)
|
||||
|
||||
with open(testfile1, 'a') as f:
|
||||
f.write("entry12\n")
|
||||
|
||||
# After restart new output file is created -> only 1 new entry
|
||||
self.wait_until(
|
||||
lambda: self.output_has(lines=1, output_file="output/filebeat_2"),
|
||||
max_timeout=10)
|
||||
|
||||
filebeat.check_kill_and_wait()
|
108
vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py
generated
vendored
108
vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py
generated
vendored
@ -624,114 +624,6 @@ class Test(BaseTest):
|
||||
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
|
||||
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
|
||||
|
||||
|
||||
def test_migration_non_windows(self):
|
||||
"""
|
||||
Tests if migration from old filebeat registry to new format works
|
||||
"""
|
||||
|
||||
if os.name == "nt":
|
||||
raise SkipTest
|
||||
|
||||
registry_file = self.working_dir + '/registry'
|
||||
|
||||
# Write old registry file
|
||||
with open(registry_file, 'w') as f:
|
||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}')
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||
clean_removed="false",
|
||||
clean_inactive="0",
|
||||
)
|
||||
|
||||
filebeat = self.start_beat()
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old registry states found: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
filebeat.check_kill_and_wait()
|
||||
|
||||
# Check if content is same as above
|
||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||
|
||||
# Compare first entry
|
||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Compare second entry
|
||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Make sure the right number of entries is in
|
||||
data = self.get_registry()
|
||||
assert len(data) == 2
|
||||
|
||||
def test_migration_windows(self):
|
||||
"""
|
||||
Tests if migration from old filebeat registry to new format works
|
||||
"""
|
||||
|
||||
if os.name != "nt":
|
||||
raise SkipTest
|
||||
|
||||
registry_file = self.working_dir + '/registry'
|
||||
|
||||
# Write old registry file
|
||||
with open(registry_file, 'w') as f:
|
||||
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}')
|
||||
|
||||
self.render_config_template(
|
||||
path=os.path.abspath(self.working_dir) + "/log/input*",
|
||||
)
|
||||
|
||||
filebeat = self.start_beat()
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old registry states found: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
self.wait_until(
|
||||
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
|
||||
max_timeout=15)
|
||||
|
||||
filebeat.check_kill_and_wait()
|
||||
|
||||
# Check if content is same as above
|
||||
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
|
||||
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
|
||||
|
||||
# Compare first entry
|
||||
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/hello.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Compare second entry
|
||||
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}')
|
||||
newJson = self.get_registry_entry_by_path("logs/log2.log")
|
||||
del newJson["timestamp"]
|
||||
del newJson["ttl"]
|
||||
assert newJson == oldJson
|
||||
|
||||
# Make sure the right number of entries is in
|
||||
data = self.get_registry()
|
||||
assert len(data) == 2
|
||||
|
||||
|
||||
def test_clean_inactive(self):
|
||||
"""
|
||||
Checks that states are properly removed after clean_inactive
|
||||
|
1
vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/libbeat/beat/version.go
generated
vendored
2
vendor/github.com/elastic/beats/libbeat/beat/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package beat
|
||||
|
||||
const defaultBeatVersion = "5.1.1"
|
||||
const defaultBeatVersion = "5.1.2"
|
||||
|
4
vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc
generated
vendored
@ -7,9 +7,11 @@ out some of them here.
|
||||
NOTE: Elastic provides no warranty or support for community-sourced Beats.
|
||||
|
||||
[horizontal]
|
||||
https://github.com/awormuth/amazonbeat[amazonbeat]:: Reads data from a specified Amazon product.
|
||||
https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status.
|
||||
https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow.
|
||||
https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag.
|
||||
https://github.com/hartfordfive/cloudflarebeat[cloudflarebeat]:: Indexes log entries from the Cloudflare Enterprise Log Share API.
|
||||
https://github.com/aidan-/cloudtrailbeat[cloudtrailbeat]:: Reads events from Amazon Web Services' https://aws.amazon.com/cloudtrail/[CloudTrail].
|
||||
https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to elastic.
|
||||
https://github.com/Ingensi/dockbeat[dockbeat]:: Reads Docker container
|
||||
@ -27,6 +29,7 @@ https://github.com/radoondas/jmxproxybeat[jmxproxybeat]:: Reads Tomcat JMX metri
|
||||
https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems.
|
||||
https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus).
|
||||
https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch.
|
||||
https://github.com/scottcrespo/mongobeat[mongobeat]:: Monitors MongoDB instances and can be configured to send multiple document types to Elasticsearch.
|
||||
https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and send results to Elasticsearch.
|
||||
https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data.
|
||||
https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx.
|
||||
@ -44,6 +47,7 @@ https://github.com/martinhoefling/saltbeat[saltbeat]:: Reads events from salt ma
|
||||
https://github.com/consulthys/springbeat[springbeat]:: Collects health and metrics data from Spring Boot applications running with the actuator module.
|
||||
https://github.com/buehler/go-elastic-twitterbeat[twitterbeat]:: Reads tweets for specified screen names.
|
||||
https://github.com/gravitational/udpbeat[udpbeat]:: Ships structured logs via UDP.
|
||||
https://github.com/hartfordfive/udplogbeat[udplogbeat]:: Accept events via local UDP socket (in plain-text or JSON with ability to enforce schemas). Can also be used for applications only supporting syslog logging.
|
||||
https://github.com/cleesmith/unifiedbeat[unifiedbeat]:: Reads records from Unified2 binary files generated by
|
||||
network intrusion detection software and indexes the records in Elasticsearch.
|
||||
https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI.
|
||||
|
291
vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc
generated
vendored
291
vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc
generated
vendored
@ -11,22 +11,21 @@
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
[[configuration-processors]]
|
||||
=== Processors Configuration
|
||||
== Processors
|
||||
|
||||
include::../../libbeat/docs/processors.asciidoc[]
|
||||
|
||||
Each processor has associated an action with a set of parameters and optionally a condition. If the condition is
|
||||
present, then the action is executed only if the condition
|
||||
is fulfilled. If no condition is passed then the action is always executed.
|
||||
To define a processor, you specify the processor name, an optional condition,
|
||||
and a set of parameters:
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
processors:
|
||||
- <action>:
|
||||
- <processor_name>:
|
||||
when:
|
||||
<condition>
|
||||
<parameters>
|
||||
- <action>:
|
||||
- <processor_name>:
|
||||
when:
|
||||
<condition>
|
||||
<parameters>
|
||||
@ -34,21 +33,33 @@ processors:
|
||||
|
||||
------
|
||||
|
||||
where <action> can be a way to select the fields that are exported or a way to add meta data to the event , <condition> contains the definition of the condition.
|
||||
and <parameters> is the list of parameters passed along the <action>.
|
||||
Where:
|
||||
|
||||
* <processor_name> specifies a <<processors,processor>> that performs some kind
|
||||
of action, such as selecting the fields that are exported or adding metadata to
|
||||
the event.
|
||||
* <when: condition> specifies an optional <<conditions,condition>>. If the
|
||||
condition is present, then the action is executed only if the condition is
|
||||
fulfilled. If no condition is passed, then the action is always executed.
|
||||
* <parameters> is the list of parameters to pass to the processor.
|
||||
|
||||
See <<filtering-and-enhancing-data>> for specific {beatname_uc} examples.
|
||||
|
||||
[[filtering-condition]]
|
||||
==== Condition
|
||||
[float]
|
||||
[[conditions]]
|
||||
=== Conditions
|
||||
|
||||
Each condition receives a field to compare or multiple fields under the same condition and then `AND` is used between
|
||||
them. You can see a list of the <<exported-fields,`exported fields`>>.
|
||||
Each condition receives a field to compare. You can specify multiple fields
|
||||
under the same condition by using `AND` between the fields (for example,
|
||||
`field1 AND field2`).
|
||||
|
||||
For each field, you can specify a simple field name or a nested map, for example `dns.question.name`.
|
||||
For each field, you can specify a simple field name or a nested map, for
|
||||
example `dns.question.name`.
|
||||
|
||||
See <<exported-fields>> for a list of all the fields that are exported by
|
||||
{beatname_uc}.
|
||||
|
||||
A condition can be:
|
||||
The supported conditions are:
|
||||
|
||||
* <<condition-equals,`equals`>>
|
||||
* <<condition-contains,`contains`>>
|
||||
@ -59,14 +70,15 @@ A condition can be:
|
||||
* <<condition-not, `not`>>
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
[[condition-equals]]
|
||||
===== equals
|
||||
==== equals
|
||||
|
||||
With the `equals` condition, you can compare if a field has a certain value. The condition accepts only an integer or a string
|
||||
value.
|
||||
With the `equals` condition, you can compare if a field has a certain value.
|
||||
The condition accepts only an integer or a string value.
|
||||
|
||||
For example, the following condition checks if the response code of the HTTP transaction is 200:
|
||||
For example, the following condition checks if the response code of the HTTP
|
||||
transaction is 200:
|
||||
|
||||
[source,yaml]
|
||||
-------
|
||||
@ -74,13 +86,15 @@ equals:
|
||||
http.response.code: 200
|
||||
-------
|
||||
|
||||
[float]
|
||||
[[condition-contains]]
|
||||
===== contains
|
||||
==== contains
|
||||
|
||||
The `contains` condition checks if a value is part of a field. The field can be
|
||||
a string or an array of strings. The condition accepts only a string value.
|
||||
|
||||
For example, the following condition checks if an error is part of the transaction status:
|
||||
For example, the following condition checks if an error is part of the
|
||||
transaction status:
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
@ -88,13 +102,15 @@ contains:
|
||||
status: "Specific error"
|
||||
------
|
||||
|
||||
|
||||
[float]
|
||||
[[condition-regexp]]
|
||||
===== regexp
|
||||
==== regexp
|
||||
|
||||
The `regexp` condition checks the field against a regular expression. The condition accepts only strings.
|
||||
The `regexp` condition checks the field against a regular expression. The
|
||||
condition accepts only strings.
|
||||
|
||||
For example, the following condition checks if the process name starts with `foo`:
|
||||
For example, the following condition checks if the process name starts with
|
||||
`foo`:
|
||||
|
||||
[source,yaml]
|
||||
-----
|
||||
@ -102,14 +118,16 @@ reqexp:
|
||||
system.process.name: "foo.*"
|
||||
-----
|
||||
|
||||
[float]
|
||||
[[condition-range]]
|
||||
===== range
|
||||
==== range
|
||||
|
||||
The `range` condition checks if the field is in a certain range of values. The condition supports `lt`, `lte`, `gt` and `gte`. The
|
||||
condition accepts only integer or float values.
|
||||
The `range` condition checks if the field is in a certain range of values. The
|
||||
condition supports `lt`, `lte`, `gt` and `gte`. The condition accepts only
|
||||
integer or float values.
|
||||
|
||||
For example, the following condition checks for failed HTTP transaction by comparing the `http.response.code` field with
|
||||
400.
|
||||
For example, the following condition checks for failed HTTP transactions by
|
||||
comparing the `http.response.code` field with 400.
|
||||
|
||||
|
||||
[source,yaml]
|
||||
@ -119,7 +137,7 @@ range:
|
||||
gte: 400
|
||||
------
|
||||
|
||||
that can be also translated to:
|
||||
This can also be written as:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
@ -127,7 +145,8 @@ range:
|
||||
http.response.code.gte: 400
|
||||
----
|
||||
|
||||
For example, the following condition checks if the CPU usage in percentage has a value between 0.5 and 0.8.
|
||||
The following condition checks if the CPU usage in percentage has a value
|
||||
between 0.5 and 0.8.
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
@ -137,8 +156,9 @@ range:
|
||||
------
|
||||
|
||||
|
||||
[float]
|
||||
[[condition-or]]
|
||||
===== OR
|
||||
==== OR
|
||||
|
||||
The `or` operator receives a list of conditions.
|
||||
|
||||
@ -152,7 +172,8 @@ or:
|
||||
|
||||
-------
|
||||
|
||||
For example the condition `http.response.code = 304 OR http.response.code = 404` translates to:
|
||||
For example, to configure the condition
|
||||
`http.response.code = 304 OR http.response.code = 404`:
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
@ -163,9 +184,9 @@ or:
|
||||
http.response.code: 404
|
||||
------
|
||||
|
||||
|
||||
[float]
|
||||
[[condition-and]]
|
||||
===== AND
|
||||
==== AND
|
||||
|
||||
The `and` operator receives a list of conditions.
|
||||
|
||||
@ -179,7 +200,8 @@ and:
|
||||
|
||||
-------
|
||||
|
||||
For example the condition `http.response.code = 200 AND status = OK` translates to:
|
||||
For example, to configure the condition
|
||||
`http.response.code = 200 AND status = OK`:
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
@ -202,8 +224,9 @@ or:
|
||||
|
||||
------
|
||||
|
||||
[float]
|
||||
[[condition-not]]
|
||||
===== NOT
|
||||
==== NOT
|
||||
|
||||
The `not` operator receives the condition to negate.
|
||||
|
||||
@ -214,7 +237,7 @@ not:
|
||||
|
||||
-------
|
||||
|
||||
For example the condition `NOT status = OK` translates to:
|
||||
For example, to configure the condition `NOT status = OK`:
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
@ -223,81 +246,24 @@ not:
|
||||
status: OK
|
||||
------
|
||||
|
||||
[float]
|
||||
[[processors]]
|
||||
=== Processors
|
||||
|
||||
The supported processors are:
|
||||
|
||||
==== Actions
|
||||
|
||||
The supported actions are:
|
||||
|
||||
* <<include-fields,`include_fields`>>
|
||||
* <<drop-fields,`drop_fields`>>
|
||||
* <<drop-event,`drop_event`>>
|
||||
* <<add-cloud-metadata,`add_cloud_metadata`>>
|
||||
|
||||
See <<exported-fields>> for the full list of possible fields.
|
||||
|
||||
[[include-fields]]
|
||||
===== include_fields
|
||||
|
||||
The `include_fields` action specifies what fields to export if a certain condition is fulfilled. The condition is
|
||||
optional and if it's missing then the defined fields are always exported. The `@timestamp` and
|
||||
`type` fields are always exported, even if they are not defined in the `include_fields` list.
|
||||
|
||||
[source,yaml]
|
||||
-------
|
||||
processors:
|
||||
- include_fields:
|
||||
when:
|
||||
condition
|
||||
fields: ["field1", "field2", ...]
|
||||
-------
|
||||
|
||||
You can specify multiple `include_fields` actions under the `processors` section.
|
||||
|
||||
|
||||
NOTE: If you define an empty list of fields under `include_fields`, then only the required fields, `@timestamp` and `type`, are
|
||||
exported.
|
||||
|
||||
|
||||
[[drop-fields]]
|
||||
===== drop_fields
|
||||
|
||||
The `drop_fields` action specifies what fields to drop if a certain condition is fulfilled. The condition is optional
|
||||
and if it's missing then the defined fields are always dropped. The `@timestamp` and `type` fields cannot be dropped,
|
||||
even if they show up in the `drop_fields` list.
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------
|
||||
processors:
|
||||
- drop_fields:
|
||||
when:
|
||||
condition
|
||||
fields: ["field1", "field2", ...]
|
||||
-----------------------------------------------------
|
||||
|
||||
NOTE: If you define an empty list of fields under `drop_fields`, then no fields are dropped.
|
||||
|
||||
|
||||
[[drop-event]]
|
||||
===== drop_event
|
||||
|
||||
The `drop_event` action drops the entire event if the associated condition is fulfilled. The condition is mandatory, as
|
||||
without one all the events are dropped.
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
processors:
|
||||
- drop_event:
|
||||
when:
|
||||
condition
|
||||
------
|
||||
* <<decode-json-fields,`decode_json_fields`>>
|
||||
* <<drop-event,`drop_event`>>
|
||||
* <<drop-fields,`drop_fields`>>
|
||||
* <<include-fields,`include_fields`>>
|
||||
|
||||
[[add-cloud-metadata]]
|
||||
===== add_cloud_metadata
|
||||
=== add_cloud_metadata
|
||||
|
||||
The `add_cloud_metadata` action enriches each event with instance metadata from
|
||||
the machine's hosting provider. At startup it will detect the hosting provider
|
||||
and cache the instance metadata.
|
||||
The `add_cloud_metadata` processor enriches each event with instance metadata
|
||||
from the machine's hosting provider. At startup it will detect the hosting
|
||||
provider and cache the instance metadata.
|
||||
|
||||
Three cloud providers are supported.
|
||||
|
||||
@ -308,14 +274,16 @@ Three cloud providers are supported.
|
||||
The simple configuration below enables the processor.
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The `add_cloud_metadata` action has one optional configuration setting named
|
||||
The `add_cloud_metadata` processor has one optional configuration setting named
|
||||
`timeout` that specifies the maximum amount of time to wait for a successful
|
||||
response when detecting the hosting provider. The default timeout value is `3s`.
|
||||
response when detecting the hosting provider. The default timeout value is
|
||||
`3s`.
|
||||
|
||||
If a timeout occurs then no instance metadata will be added to the events. This
|
||||
makes it possible to enable this processor for all your deployments (in the
|
||||
cloud or on-premise).
|
||||
@ -326,7 +294,7 @@ examples for each of the supported providers.
|
||||
_EC2_
|
||||
|
||||
[source,json]
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
{
|
||||
"meta": {
|
||||
"cloud": {
|
||||
@ -338,12 +306,12 @@ _EC2_
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
_Digital Ocean_
|
||||
|
||||
[source,json]
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
{
|
||||
"meta": {
|
||||
"cloud": {
|
||||
@ -353,12 +321,12 @@ _Digital Ocean_
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
_GCE_
|
||||
|
||||
[source,json]
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
{
|
||||
"meta": {
|
||||
"cloud": {
|
||||
@ -370,4 +338,91 @@ _GCE_
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------------------------------------
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
[[decode-json-fields]]
|
||||
=== decode_json_fields
|
||||
|
||||
The `decode_json_fields` processor decodes fields containing JSON strings and
|
||||
replaces the strings with valid JSON objects.
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
fields: ["field1", "field2", ...]
|
||||
process_array: false
|
||||
max_depth: 1
|
||||
-----------------------------------------------------
|
||||
|
||||
The `decode_json_fields` processor has the following configuration settings:
|
||||
|
||||
`fields`:: The fields containing JSON strings to decode.
|
||||
`process_array`:: (Optional) A boolean that specifies whether to process
|
||||
arrays. The default is false.
|
||||
`max_depth`:: (Optional) The maximum parsing depth. The default is 1.
|
||||
|
||||
[[drop-event]]
|
||||
=== drop_event
|
||||
|
||||
The `drop_event` processor drops the entire event if the associated condition
|
||||
is fulfilled. The condition is mandatory, because without one, all the events
|
||||
are dropped.
|
||||
|
||||
[source,yaml]
|
||||
------
|
||||
processors:
|
||||
- drop_event:
|
||||
when:
|
||||
condition
|
||||
------
|
||||
|
||||
See <<conditions>> for a list of supported conditions.
|
||||
|
||||
[[drop-fields]]
|
||||
=== drop_fields
|
||||
|
||||
The `drop_fields` processor specifies which fields to drop if a certain
|
||||
condition is fulfilled. The condition is optional. If it's missing, the
|
||||
specified fields are always dropped. The `@timestamp` and `type` fields cannot
|
||||
be dropped, even if they show up in the `drop_fields` list.
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------
|
||||
processors:
|
||||
- drop_fields:
|
||||
when:
|
||||
condition
|
||||
fields: ["field1", "field2", ...]
|
||||
-----------------------------------------------------
|
||||
|
||||
See <<conditions>> for a list of supported conditions.
|
||||
|
||||
NOTE: If you define an empty list of fields under `drop_fields`, then no fields
|
||||
are dropped.
|
||||
|
||||
[[include-fields]]
|
||||
=== include_fields
|
||||
|
||||
The `include_fields` processor specifies which fields to export if a certain
|
||||
condition is fulfilled. The condition is optional. If it's missing, the
|
||||
specified fields are always exported. The `@timestamp` and `type` fields are
|
||||
always exported, even if they are not defined in the `include_fields` list.
|
||||
|
||||
[source,yaml]
|
||||
-------
|
||||
processors:
|
||||
- include_fields:
|
||||
when:
|
||||
condition
|
||||
fields: ["field1", "field2", ...]
|
||||
-------
|
||||
|
||||
See <<conditions>> for a list of supported conditions.
|
||||
|
||||
You can specify multiple `include_fields` processors under the `processors`
|
||||
section.
|
||||
|
||||
NOTE: If you define an empty list of fields under `include_fields`, then only
|
||||
the required fields, `@timestamp` and `type`, are exported.
|
||||
|
||||
|
17
vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc
generated
vendored
17
vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc
generated
vendored
@ -9,15 +9,18 @@
|
||||
//// include::../../libbeat/docs/filtering.asciidoc[]
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
You can define processors in your configuration to process events before they are sent to the configured output.
|
||||
The libbeat library provides processors for reducing the number of exported fields, and processors for
|
||||
enhancing events with additional metadata. Each processor receives an event, applies a defined action to the event,
|
||||
and returns the event. If you define a list of processors, they are executed in the order they are defined in the
|
||||
configuration file.
|
||||
You can define processors in your configuration to process events before they
|
||||
are sent to the configured output.The libbeat library provides processors for:
|
||||
|
||||
* reducing the number of exported fields
|
||||
* enhancing events with additional metadata
|
||||
* performing additional processing and decoding
|
||||
|
||||
Each processor receives an event, applies a defined action to the event, and
|
||||
returns the event. If you define a list of processors, they are executed in the
|
||||
order they are defined in the {beatname_uc} configuration file.
|
||||
|
||||
[source,yaml]
|
||||
-------
|
||||
event -> processor 1 -> event1 -> processor 2 -> event2 ...
|
||||
-------
|
||||
|
||||
The processors are defined in the {beatname_uc} configuration file.
|
||||
|
1
vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc
generated
vendored
@ -6,6 +6,7 @@
|
||||
--
|
||||
This section summarizes the changes in each release.
|
||||
|
||||
* <<release-notes-5.1.2>>
|
||||
* <<release-notes-5.1.1>>
|
||||
* <<release-notes-5.1.0>>
|
||||
* <<release-notes-5.0.1>>
|
||||
|
4
vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc
generated
vendored
@ -34,7 +34,7 @@ sudo apt-get install apt-transport-https
|
||||
+
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
echo "deb https://artifacts.elastic.co/packages/5.x-prerelease/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list
|
||||
echo "deb https://artifacts.elastic.co/packages/5.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list
|
||||
--------------------------------------------------
|
||||
+
|
||||
[WARNING]
|
||||
@ -85,7 +85,7 @@ your `/etc/yum.repos.d/` directory and add the following lines:
|
||||
--------------------------------------------------
|
||||
[elastic-5.x]
|
||||
name=Elastic repository for 5.x packages
|
||||
baseurl=https://artifacts.elastic.co/packages/5.x-prerelease/yum
|
||||
baseurl=https://artifacts.elastic.co/packages/5.x/yum
|
||||
gpgcheck=1
|
||||
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
|
||||
enabled=1
|
||||
|
1
vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
5
vendor/github.com/elastic/beats/libbeat/scripts/Makefile
generated
vendored
5
vendor/github.com/elastic/beats/libbeat/scripts/Makefile
generated
vendored
@ -43,7 +43,8 @@ TEST_ENVIRONMENT?=false
|
||||
SYSTEM_TESTS?=false
|
||||
GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd
|
||||
TESTING_ENVIRONMENT?=snapshot
|
||||
DOCKER_COMPOSE?=docker-compose -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml
|
||||
DOCKER_COMPOSE_PROJECT_NAME?=${BEATNAME}_${TESTING_ENVIRONMENT}
|
||||
DOCKER_COMPOSE?=docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml
|
||||
DOCKER_CACHE?=1 # If set to 0, all docker images are created without cache
|
||||
GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES}))
|
||||
PYTHON_ENV?=${BUILD_DIR}/python-env
|
||||
@ -304,7 +305,7 @@ start-environment: stop-environment
|
||||
.PHONY: stop-environment
|
||||
stop-environment:
|
||||
-${DOCKER_COMPOSE} stop
|
||||
-${DOCKER_COMPOSE} rm -f -v -a
|
||||
-${DOCKER_COMPOSE} rm -f -v
|
||||
|
||||
.PHONY: write-environment
|
||||
write-environment:
|
||||
|
2
vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py
generated
vendored
2
vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py
generated
vendored
@ -60,6 +60,8 @@ def field_to_json(desc, path, output):
|
||||
field["aggregatable"] = False
|
||||
elif desc["type"] == "date":
|
||||
field["type"] = "date"
|
||||
elif desc["type"] == "geo_point":
|
||||
field["type"] = "geo_point"
|
||||
else:
|
||||
field["type"] = "string"
|
||||
|
||||
|
2
vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml
generated
vendored
@ -67,7 +67,7 @@ metricbeat.modules:
|
||||
# Password of hosts. Empty by default
|
||||
#password: test123
|
||||
|
||||
#------------------------------- docker Module -------------------------------
|
||||
#------------------------------- Docker Module -------------------------------
|
||||
#- module: docker
|
||||
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
||||
#hosts: ["unix:///var/run/docker.sock"]
|
||||
|
36
vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc
generated
vendored
36
vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc
generated
vendored
@ -506,7 +506,7 @@ The document type. Always set to "metricsets".
|
||||
|
||||
|
||||
[[exported-fields-docker]]
|
||||
== docker Fields
|
||||
== Docker Fields
|
||||
|
||||
experimental[]
|
||||
Docker stats collected from Docker.
|
||||
@ -516,14 +516,14 @@ Docker stats collected from Docker.
|
||||
[float]
|
||||
== docker Fields
|
||||
|
||||
docker contains different informations and statistics of docker's containers running
|
||||
Information and statistics about docker's running containers.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
== container Fields
|
||||
|
||||
Docker container metrics
|
||||
Docker container metrics.
|
||||
|
||||
|
||||
|
||||
@ -532,7 +532,7 @@ Docker container metrics
|
||||
|
||||
type: keyword
|
||||
|
||||
Executed command in docker container.
|
||||
Command that was executed in the Docker container.
|
||||
|
||||
|
||||
[float]
|
||||
@ -540,7 +540,7 @@ Executed command in docker container.
|
||||
|
||||
type: date
|
||||
|
||||
Date then the container was created.
|
||||
Date when the container was created.
|
||||
|
||||
|
||||
[float]
|
||||
@ -595,13 +595,13 @@ Total size of all the files in the container.
|
||||
|
||||
type: long
|
||||
|
||||
Size of the files which have been created or changed since creation.
|
||||
Size of the files that have been created or changed since creation.
|
||||
|
||||
|
||||
[float]
|
||||
== cpu Fields
|
||||
|
||||
Runtime cpu metrics.
|
||||
Runtime CPU metrics.
|
||||
|
||||
|
||||
|
||||
@ -612,7 +612,7 @@ type: scaled_float
|
||||
|
||||
format: percentage
|
||||
|
||||
The system kernel consumed by The Docker server.
|
||||
The system kernel consumed by the Docker server.
|
||||
|
||||
|
||||
[float]
|
||||
@ -620,7 +620,7 @@ The system kernel consumed by The Docker server.
|
||||
|
||||
type: long
|
||||
|
||||
Cpu kernel tikcs
|
||||
CPU kernel ticks.
|
||||
|
||||
|
||||
[float]
|
||||
@ -637,7 +637,7 @@ format: percentage
|
||||
|
||||
type: long
|
||||
|
||||
Cpu system tikcs
|
||||
CPU system ticks.
|
||||
|
||||
|
||||
[float]
|
||||
@ -654,7 +654,7 @@ format: percentage
|
||||
|
||||
type: long
|
||||
|
||||
Cpu user tikcs
|
||||
CPU user ticks
|
||||
|
||||
|
||||
[float]
|
||||
@ -664,13 +664,13 @@ type: scaled_float
|
||||
|
||||
format: percentage
|
||||
|
||||
Total cpu usage.
|
||||
Total CPU usage.
|
||||
|
||||
|
||||
[float]
|
||||
== diskio Fields
|
||||
|
||||
Diskio metrics.
|
||||
Disk I/O metrics.
|
||||
|
||||
|
||||
|
||||
@ -695,14 +695,14 @@ Number of writes.
|
||||
|
||||
type: scaled_float
|
||||
|
||||
Reads and writes numbers combined.
|
||||
Number of reads and writes combined.
|
||||
|
||||
|
||||
[float]
|
||||
== info Fields
|
||||
|
||||
experimental[]
|
||||
info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information
|
||||
Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information.
|
||||
|
||||
|
||||
|
||||
@ -750,7 +750,7 @@ Total number of existing containers.
|
||||
|
||||
type: keyword
|
||||
|
||||
Unique docker host identifier.
|
||||
Unique Docker host identifier.
|
||||
|
||||
|
||||
[float]
|
||||
@ -789,7 +789,7 @@ Memory limit.
|
||||
[float]
|
||||
== rss Fields
|
||||
|
||||
Rss memory stats.
|
||||
RSS memory stats.
|
||||
|
||||
|
||||
|
||||
@ -849,7 +849,7 @@ Total memory usage.
|
||||
[float]
|
||||
== network Fields
|
||||
|
||||
Netowrk metrics.
|
||||
Network metrics.
|
||||
|
||||
|
||||
|
||||
|
38
vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc
generated
vendored
38
vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc
generated
vendored
@ -1,44 +1,10 @@
|
||||
[[filtering-and-enhancing-data]]
|
||||
== Filtering and Enhancing the Exported Data
|
||||
|
||||
When your use case requires only a subset of the data exported by Metricbeat or you need to add metadata,
|
||||
you can <<metricbeat-filtering-overview,use Metricbeat config options to filter
|
||||
the data>>, or you can <<defining-processors,define processors>>.
|
||||
|
||||
|
||||
[float]
|
||||
[[metricbeat-filtering-overview]]
|
||||
=== Metricbeat Module Filtering
|
||||
|
||||
Each module accepts a list of filters in its configuration. These filters are
|
||||
applied to the data generated by the module. These filters are applied to the
|
||||
module data prior to the addition of the common beat fields like `beat.hostname`
|
||||
and `type` so they can only be used to filter fields from the module.
|
||||
|
||||
The following example reduces the exported fields of the Redis module to
|
||||
include only the `redis.info.memory` fields.
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
metricbeat.modules:
|
||||
- module: redis
|
||||
metricsets: ["info"]
|
||||
period: 1s
|
||||
hosts: ["127.0.0.1:6379"]
|
||||
enabled: true
|
||||
filters:
|
||||
- include_fields:
|
||||
fields: ['memory']
|
||||
----
|
||||
|
||||
[float]
|
||||
[[defining-processors]]
|
||||
=== Defining Processors
|
||||
|
||||
include::../../libbeat/docs/processors.asciidoc[]
|
||||
|
||||
For example, the following filters configuration reduces the exported fields by
|
||||
dropping the `beat.name` and `beat.hostname` fields under `beat`, from all documents.
|
||||
For example, the following configuration reduces the exported fields by
|
||||
dropping the `beat.name` and `beat.hostname` fields under `beat` from all documents.
|
||||
|
||||
[source, yaml]
|
||||
----
|
||||
|
25
vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc
generated
vendored
25
vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc
generated
vendored
@ -7,28 +7,7 @@ This section contains detailed information about the metric collecting modules
|
||||
contained in {beatname_uc}. Each module contains one or multiple metricsets. More details
|
||||
about each module can be found under the links below.
|
||||
|
||||
* <<metricbeat-module-apache,Apache>>
|
||||
* <<metricbeat-module-docker,Docker>>
|
||||
* <<metricbeat-module-haproxy,HAProxy>>
|
||||
* <<metricbeat-module-mongodb,MongoDB>>
|
||||
* <<metricbeat-module-mysql,MySQL>>
|
||||
* <<metricbeat-module-nginx,Nginx>>
|
||||
* <<metricbeat-module-redis,Redis>>
|
||||
* <<metricbeat-module-postgresql,Postgresql>>
|
||||
* <<metricbeat-module-system,System>>
|
||||
* <<metricbeat-module-zookeeper,ZooKeeper>>
|
||||
|
||||
--
|
||||
|
||||
include::modules/apache.asciidoc[]
|
||||
include::modules/docker.asciidoc[]
|
||||
include::modules/haproxy.asciidoc[]
|
||||
include::modules/mongodb.asciidoc[]
|
||||
include::modules/mysql.asciidoc[]
|
||||
include::modules/nginx.asciidoc[]
|
||||
include::modules/postgresql.asciidoc[]
|
||||
include::modules/redis.asciidoc[]
|
||||
include::modules/system.asciidoc[]
|
||||
include::modules/zookeeper.asciidoc[]
|
||||
include::modules_list.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
6
vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc
generated
vendored
6
vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc
generated
vendored
@ -3,18 +3,18 @@ This file is generated! See scripts/docs_collector.py
|
||||
////
|
||||
|
||||
[[metricbeat-module-docker]]
|
||||
== docker Module
|
||||
== Docker Module
|
||||
|
||||
experimental[]
|
||||
|
||||
This is the docker Module.
|
||||
This module fetches metrics from https://www.docker.com/[Docker] containers.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
=== Example Configuration
|
||||
|
||||
The docker module supports the standard configuration options that are described
|
||||
The Docker module supports the standard configuration options that are described
|
||||
in <<configuration-metricbeat>>. Here is an example configuration:
|
||||
|
||||
[source,yaml]
|
||||
|
30
vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc
generated
vendored
Normal file
30
vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
////
|
||||
This file is generated! See scripts/docs_collector.py
|
||||
////
|
||||
|
||||
* <<metricbeat-module-apache,Apache>>
|
||||
* <<metricbeat-module-docker,Docker>>
|
||||
* <<metricbeat-module-haproxy,haproxy>>
|
||||
* <<metricbeat-module-kafka,kafka>>
|
||||
* <<metricbeat-module-mongodb,MongoDB>>
|
||||
* <<metricbeat-module-mysql,MySQL>>
|
||||
* <<metricbeat-module-nginx,Nginx>>
|
||||
* <<metricbeat-module-postgresql,PostgreSQL>>
|
||||
* <<metricbeat-module-redis,Redis>>
|
||||
* <<metricbeat-module-system,System>>
|
||||
* <<metricbeat-module-zookeeper,ZooKeeper>>
|
||||
|
||||
|
||||
--
|
||||
|
||||
include::modules/apache.asciidoc[]
|
||||
include::modules/docker.asciidoc[]
|
||||
include::modules/haproxy.asciidoc[]
|
||||
include::modules/kafka.asciidoc[]
|
||||
include::modules/mongodb.asciidoc[]
|
||||
include::modules/mysql.asciidoc[]
|
||||
include::modules/nginx.asciidoc[]
|
||||
include::modules/postgresql.asciidoc[]
|
||||
include::modules/redis.asciidoc[]
|
||||
include::modules/system.asciidoc[]
|
||||
include::modules/zookeeper.asciidoc[]
|
2
vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -10,7 +10,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
|
||||
* <<configuration-metricbeat>>
|
||||
* <<configuration-general>>
|
||||
* <<configuration-processors>>
|
||||
* <<elasticsearch-output>>
|
||||
* <<logstash-output>>
|
||||
* <<kafka-output>>
|
||||
@ -20,5 +19,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-output-ssl>>
|
||||
* <<configuration-path>>
|
||||
* <<configuration-logging>>
|
||||
* <<configuration-processors>>
|
||||
|
||||
include::configuration/metricbeat-options.asciidoc[]
|
||||
|
@ -62,15 +62,17 @@ A list of tags that will be sent with the metricset event. This setting is optio
|
||||
|
||||
===== filters
|
||||
|
||||
A list of filters to apply to the data generated by the module. For more detail on how to configure
|
||||
filters, see <<configuration-processors>>.
|
||||
deprecated[5.1,This option will be renamed and changed in a future release]
|
||||
|
||||
A list of filters to apply to the data generated by the module.
|
||||
|
||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
|
1
vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml
generated
vendored
@ -67,7 +67,7 @@ metricbeat.modules:
|
||||
# Password of hosts. Empty by default
|
||||
#password: test123
|
||||
|
||||
#------------------------------- docker Module -------------------------------
|
||||
#------------------------------- Docker Module -------------------------------
|
||||
#- module: docker
|
||||
#metricsets: ["cpu", "info", "memory", "network", "diskio", "container"]
|
||||
#hosts: ["unix:///var/run/docker.sock"]
|
||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc
generated
vendored
@ -1,6 +1,6 @@
|
||||
== docker Module
|
||||
== Docker Module
|
||||
|
||||
experimental[]
|
||||
|
||||
This is the docker Module.
|
||||
This module fetches metrics from https://www.docker.com/[Docker] containers.
|
||||
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml
generated
vendored
@ -1,5 +1,5 @@
|
||||
- key: docker
|
||||
title: "docker"
|
||||
title: "Docker"
|
||||
description: >
|
||||
experimental[]
|
||||
|
||||
@ -9,5 +9,5 @@
|
||||
- name: docker
|
||||
type: group
|
||||
description: >
|
||||
docker contains different informations and statistics of docker's containers running
|
||||
Information and statistics about docker's running containers.
|
||||
fields:
|
||||
|
@ -1,3 +1,4 @@
|
||||
=== docker container MetricSet
|
||||
=== Docker Container Metricset
|
||||
|
||||
This is the container metricset of the module docker.
|
||||
The Docker `container` metricset collects information and statistics about
|
||||
running Docker containers.
|
||||
|
8
vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml
generated
vendored
8
vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml
generated
vendored
@ -1,16 +1,16 @@
|
||||
- name: container
|
||||
type: group
|
||||
description: >
|
||||
Docker container metrics
|
||||
Docker container metrics.
|
||||
fields:
|
||||
- name: command
|
||||
type: keyword
|
||||
description: >
|
||||
Executed command in docker container.
|
||||
Command that was executed in the Docker container.
|
||||
- name: created
|
||||
type: date
|
||||
description: >
|
||||
Date then the container was created.
|
||||
Date when the container was created.
|
||||
- name: id
|
||||
type: keyword
|
||||
description: >
|
||||
@ -39,4 +39,4 @@
|
||||
- name: rw
|
||||
type: long
|
||||
description: >
|
||||
Size of the files which have been created or changed since creation.
|
||||
Size of the files that have been created or changed since creation.
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
||||
=== docker cpu MetricSet
|
||||
=== Docker CPU Metricset
|
||||
|
||||
This is the cpu metricset of the module docker.
|
||||
The Docker `cpu` metricset collects runtime CPU metrics.
|
||||
|
12
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml
generated
vendored
12
vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml
generated
vendored
@ -1,17 +1,17 @@
|
||||
- name: cpu
|
||||
type: group
|
||||
description: >
|
||||
Runtime cpu metrics.
|
||||
Runtime CPU metrics.
|
||||
fields:
|
||||
- name: kernel.pct
|
||||
type: scaled_float
|
||||
format: percentage
|
||||
description: >
|
||||
The system kernel consumed by The Docker server.
|
||||
The system kernel consumed by the Docker server.
|
||||
- name: kernel.ticks
|
||||
type: long
|
||||
description: >
|
||||
Cpu kernel tikcs
|
||||
CPU kernel ticks.
|
||||
- name: system.pct
|
||||
type: scaled_float
|
||||
format: percentage
|
||||
@ -19,7 +19,7 @@
|
||||
- name: system.ticks
|
||||
type: long
|
||||
description: >
|
||||
Cpu system tikcs
|
||||
CPU system ticks.
|
||||
- name: user.pct
|
||||
type: scaled_float
|
||||
format: percentage
|
||||
@ -27,12 +27,12 @@
|
||||
- name: user.ticks
|
||||
type: long
|
||||
description: >
|
||||
Cpu user tikcs
|
||||
CPU user ticks
|
||||
- name: total.pct
|
||||
type: scaled_float
|
||||
format: percentage
|
||||
description: >
|
||||
Total cpu usage.
|
||||
Total CPU usage.
|
||||
# TODO: how to document cpu list?
|
||||
#- name: core
|
||||
# type: list
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
||||
=== docker diskio MetricSet
|
||||
=== Docker Diskio Metricset
|
||||
|
||||
This is the diskio metricset of the module docker.
|
||||
The Docker `diskio` metricset collects disk I/O metrics.
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml
generated
vendored
@ -1,7 +1,7 @@
|
||||
- name: diskio
|
||||
type: group
|
||||
description: >
|
||||
Diskio metrics.
|
||||
Disk I/O metrics.
|
||||
fields:
|
||||
- name: reads
|
||||
type: scaled_float
|
||||
@ -14,4 +14,4 @@
|
||||
- name: total
|
||||
type: scaled_float
|
||||
description: >
|
||||
Reads and writes numbers combined.
|
||||
Number of reads and writes combined.
|
||||
|
26
vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go
generated
vendored
26
vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go
generated
vendored
@ -64,17 +64,35 @@ func FetchStats(client *docker.Client) ([]Stat, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
containersList := []Stat{}
|
||||
queue := make(chan Stat, 1)
|
||||
wg.Add(len(containers))
|
||||
|
||||
for _, container := range containers {
|
||||
// This is currently very inefficient as docker calculates the average for each request,
|
||||
// means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148
|
||||
// Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361
|
||||
containersList = append(containersList, exportContainerStats(client, &container))
|
||||
go func(container docker.APIContainers) {
|
||||
queue <- exportContainerStats(client, &container)
|
||||
}(container)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for container := range queue {
|
||||
containersList = append(containersList, container)
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return containersList, err
|
||||
}
|
||||
|
||||
// exportContainerStats loads stats for the given container
|
||||
//
|
||||
// This is currently very inefficient as docker calculates the average for each request,
|
||||
// means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148
|
||||
// Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361
|
||||
func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat {
|
||||
var wg sync.WaitGroup
|
||||
var event Stat
|
||||
|
5
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc
generated
vendored
5
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,4 @@
|
||||
=== docker info MetricSet
|
||||
=== Docker Info Metricset
|
||||
|
||||
This is the info metricset of the module docker.
|
||||
The Docker `info` metricset collects system-wide information based on the
|
||||
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information[Docker Remote API].
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml
generated
vendored
@ -3,7 +3,7 @@
|
||||
description: >
|
||||
experimental[]
|
||||
|
||||
info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information
|
||||
Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information.
|
||||
fields:
|
||||
- name: containers
|
||||
type: group
|
||||
@ -29,7 +29,7 @@
|
||||
- name: id
|
||||
type: keyword
|
||||
description: >
|
||||
Unique docker host identifier.
|
||||
Unique Docker host identifier.
|
||||
|
||||
- name: images
|
||||
type: long
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
||||
=== docker memory MetricSet
|
||||
=== Docker Memory Metricset
|
||||
|
||||
This is the memory metricset of the module docker.
|
||||
The Docker `memory` metricset collects memory metrics.
|
2
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml
generated
vendored
@ -16,7 +16,7 @@
|
||||
- name: rss
|
||||
type: group
|
||||
description: >
|
||||
Rss memory stats.
|
||||
RSS memory stats.
|
||||
fields:
|
||||
- name: total
|
||||
type: long
|
||||
|
4
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc
generated
vendored
4
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc
generated
vendored
@ -1,3 +1,3 @@
|
||||
=== docker network MetricSet
|
||||
=== Docker Network Metricset
|
||||
|
||||
This is the network metricset of the module docker.
|
||||
The Docker `network` metricset collects network metrics.
|
2
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml
generated
vendored
2
vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml
generated
vendored
@ -1,7 +1,7 @@
|
||||
- name: network
|
||||
type: group
|
||||
description: >
|
||||
Netowrk metrics.
|
||||
Network metrics.
|
||||
fields:
|
||||
|
||||
- name: interface
|
||||
|
17
vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py
generated
vendored
17
vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py
generated
vendored
@ -16,6 +16,8 @@ This file is generated! See scripts/docs_collector.py
|
||||
|
||||
"""
|
||||
|
||||
modules_list = {}
|
||||
|
||||
# Iterate over all modules
|
||||
for module in sorted(os.listdir(base_dir)):
|
||||
|
||||
@ -41,6 +43,8 @@ This file is generated! See scripts/docs_collector.py
|
||||
fields = yaml.load(f.read())
|
||||
title = fields[0]["title"]
|
||||
|
||||
modules_list[module] = title
|
||||
|
||||
config_file = beat_path + "/config.yml"
|
||||
|
||||
# Add example config file
|
||||
@ -131,6 +135,19 @@ For a description of each field in the metricset, see the
|
||||
with open(os.path.abspath("docs") + "/modules/" + module + ".asciidoc", 'w') as f:
|
||||
f.write(module_file)
|
||||
|
||||
module_list_output = generated_note
|
||||
for m, title in sorted(modules_list.iteritems()):
|
||||
module_list_output += " * <<metricbeat-module-" + m + "," + title + ">>\n"
|
||||
|
||||
module_list_output += "\n\n--\n\n"
|
||||
for m, title in sorted(modules_list.iteritems()):
|
||||
module_list_output += "include::modules/"+ m + ".asciidoc[]\n"
|
||||
|
||||
# Write module link list
|
||||
with open(os.path.abspath("docs") + "/modules_list.asciidoc", 'w') as f:
|
||||
f.write(module_list_output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Collects modules docs")
|
||||
|
@ -1,13 +1,13 @@
|
||||
{
|
||||
"hits": 0,
|
||||
"timeRestore": false,
|
||||
"description": "",
|
||||
"title": "Packetbeat Cassandra",
|
||||
"uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}",
|
||||
"panelsJSON": "[{\"col\":10,\"id\":\"Cassandra:-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra:-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
|
||||
"optionsJSON": "{\"darkTheme\":false}",
|
||||
"version": 1,
|
||||
"hits": 0,
|
||||
"timeRestore": false,
|
||||
"description": "",
|
||||
"title": "Packetbeat Cassandra",
|
||||
"uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}",
|
||||
"panelsJSON": "[{\"col\":10,\"id\":\"Cassandra-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]",
|
||||
"optionsJSON": "{\"darkTheme\":false}",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc
generated
vendored
@ -8,7 +8,7 @@ requests and their response codes are reported:
|
||||
|
||||
[source, yaml]
|
||||
-----------------------------------------------------
|
||||
filters:
|
||||
processors:
|
||||
- include_fields:
|
||||
fields:
|
||||
- bytes_in
|
||||
|
2
vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -13,7 +13,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-protocols>>
|
||||
* <<configuration-processes>>
|
||||
* <<configuration-general>>
|
||||
* <<configuration-processors>>
|
||||
* <<elasticsearch-output>>
|
||||
* <<logstash-output>>
|
||||
* <<kafka-output>>
|
||||
@ -24,6 +23,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-path>>
|
||||
* <<configuration-logging>>
|
||||
* <<configuration-run-options>>
|
||||
* <<configuration-processors>>
|
||||
|
||||
NOTE: Packetbeat maintains a real-time topology map of all the servers in your network.
|
||||
See <<maintaining-topology>> for more details.
|
||||
|
@ -794,8 +794,6 @@ process' command line as read from `/proc/<pid>/cmdline`.
|
||||
|
||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||
@ -804,3 +802,6 @@ include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||
|
||||
include::./runconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
|
||||
|
1
vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
4
vendor/github.com/elastic/beats/testing/environments/base.yml
generated
vendored
4
vendor/github.com/elastic/beats/testing/environments/base.yml
generated
vendored
@ -8,17 +8,14 @@ services:
|
||||
- logstash
|
||||
environment:
|
||||
- LS_HOST=logstash
|
||||
container_name: beat
|
||||
|
||||
elasticsearch:
|
||||
image: elasticsearch:latest
|
||||
container_name: elasticsearch
|
||||
|
||||
logstash:
|
||||
image: logstash:latest
|
||||
links:
|
||||
- elasticsearch
|
||||
container_name: logstash
|
||||
environment:
|
||||
- ES_HOST=elasticsearch
|
||||
|
||||
@ -28,4 +25,3 @@ services:
|
||||
- elasticsearch
|
||||
environment:
|
||||
- ELASTICSEARCH_URL=http://elasticsearch:9200/
|
||||
container_name: kibana
|
||||
|
@ -2,7 +2,7 @@
|
||||
FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest
|
||||
MAINTAINER Elastic Docker Team <docker@elastic.co>
|
||||
|
||||
ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.1-acecfcb6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz
|
||||
ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.2-429c1ec3/downloads/kibana/kibana-5.1.2-linux-x86_64.tar.gz
|
||||
ARG X_PACK_URL
|
||||
|
||||
EXPOSE 5601
|
||||
|
@ -2,8 +2,8 @@ FROM java:8-jre
|
||||
|
||||
ENV LS_VERSION 5
|
||||
|
||||
ENV VERSION 5.1.1
|
||||
ENV URL https://staging.elastic.co/5.1.1-acecfcb6/downloads/logstash/logstash-${VERSION}.tar.gz
|
||||
ENV VERSION 5.1.2
|
||||
ENV URL https://staging.elastic.co/5.1.2-429c1ec3/downloads/logstash/logstash-5.1.2.tar.gz
|
||||
ENV PATH $PATH:/opt/logstash-$VERSION/bin
|
||||
|
||||
# Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .`
|
||||
|
4
vendor/github.com/elastic/beats/testing/environments/snapshot.yml
generated
vendored
4
vendor/github.com/elastic/beats/testing/environments/snapshot.yml
generated
vendored
@ -7,8 +7,8 @@ services:
|
||||
context: ./docker/elasticsearch
|
||||
dockerfile: Dockerfile-snapshot
|
||||
args:
|
||||
ELASTIC_VERSION: 5.1.1
|
||||
ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.1-acecfcb6/downloads/elasticsearch'
|
||||
ELASTIC_VERSION: 5.1.2
|
||||
ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.2-429c1ec3/downloads/elasticsearch'
|
||||
#XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip
|
||||
environment:
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
|
2
vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc
generated
vendored
@ -11,7 +11,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
|
||||
* <<configuration-winlogbeat-options>>
|
||||
* <<configuration-general>>
|
||||
* <<configuration-processors>>
|
||||
* <<elasticsearch-output>>
|
||||
* <<logstash-output>>
|
||||
* <<kafka-output>>
|
||||
@ -21,6 +20,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes
|
||||
* <<configuration-output-ssl>>
|
||||
* <<configuration-path>>
|
||||
* <<configuration-logging>>
|
||||
* <<configuration-processors>>
|
||||
|
||||
include::configuration/winlogbeat-options.asciidoc[]
|
||||
|
||||
|
@ -163,6 +163,38 @@ winlogbeat.event_logs:
|
||||
event_id: 4624, 4625, 4700-4800, -4735
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
[WARNING]
|
||||
=======================================
|
||||
If you specify more that 22 event IDs to include or 22 event IDs to exclude,
|
||||
Windows will prevent Winlogbeat from reading the event log because it limits the
|
||||
number of conditions that can be used in an event log query. If this occurs a similar
|
||||
warning as shown below will be logged by Winlogbeat, and it will continue
|
||||
processing data from other event logs. For more information, see
|
||||
https://support.microsoft.com/en-us/kb/970453.
|
||||
|
||||
`WARN EventLog[Application] Open() error. No events will be read from this
|
||||
source. The specified query is invalid.`
|
||||
|
||||
If you have more than 22 event IDs, you can workaround this Windows limitation
|
||||
by using a drop_event[drop-event] processor to do the filtering after
|
||||
Winlogbeat has received the events from Windows. The filter shown below is
|
||||
equivalent to `event_id: 903, 1024, 4624` but can be expanded beyond 22
|
||||
event IDs.
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
processors:
|
||||
- drop_event.when.and:
|
||||
- equals.log_name: Security
|
||||
- not.or:
|
||||
- equals.event_id: 903
|
||||
- equals.event_id: 1024
|
||||
- equals.event_id: 4624
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
=======================================
|
||||
|
||||
|
||||
===== event_logs.level
|
||||
|
||||
A list of event levels to include. The value is a comma-separated list of
|
||||
@ -319,10 +351,11 @@ The metrics are served as a JSON document. The metrics include:
|
||||
|
||||
include::../../../../libbeat/docs/generalconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/outputconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/shared-path-config.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/loggingconfig.asciidoc[]
|
||||
|
||||
include::../../../../libbeat/docs/processors-config.asciidoc[]
|
||||
|
||||
|
1
vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc
generated
vendored
@ -1,2 +1,3 @@
|
||||
:stack-version: 5.1.1
|
||||
:doc-branch: 5.1
|
||||
:go-version: 1.7.1
|
||||
|
137
vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go
generated
vendored
Normal file
137
vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
// +build windows
|
||||
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"math/rand"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
elog "github.com/andrewkroh/sys/windows/svc/eventlog"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Benchmark tests with customized output. (`go test -v -benchtime 10s -benchtest .`)
|
||||
|
||||
var (
|
||||
benchTest = flag.Bool("benchtest", false, "Run benchmarks for the eventlog package")
|
||||
injectAmount = flag.Int("inject", 50000, "Number of events to inject before running benchmarks")
|
||||
)
|
||||
|
||||
// TestBenchmarkBatchReadSize tests the performance of different
|
||||
// batch_read_size values.
|
||||
func TestBenchmarkBatchReadSize(t *testing.T) {
|
||||
if !*benchTest {
|
||||
t.Skip("-benchtest not enabled")
|
||||
}
|
||||
|
||||
log, err := initLog(providerName, sourceName, eventCreateMsgFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := uninstallLog(providerName, sourceName, log)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Increase the log size so that it can hold these large events.
|
||||
output, err := exec.Command("wevtutil.exe", "sl", "/ms:1073741824", providerName).CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatal(err, string(output))
|
||||
}
|
||||
|
||||
// Publish test messages:
|
||||
for i := 0; i < *injectAmount; i++ {
|
||||
err = log.Report(elog.Info, uint32(rng.Int63()%1000), []string{strconv.Itoa(i) + " " + randString(256)})
|
||||
if err != nil {
|
||||
t.Fatal("ReportEvent error", err)
|
||||
}
|
||||
}
|
||||
|
||||
setup := func(t testing.TB, batchReadSize int) (EventLog, func()) {
|
||||
eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": batchReadSize})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = eventlog.Open(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eventlog, func() {
|
||||
err := eventlog.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
benchTest := func(batchSize int) {
|
||||
var err error
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
eventlog, tearDown := setup(b, batchSize)
|
||||
defer tearDown()
|
||||
b.ResetTimer()
|
||||
|
||||
// Each iteration reads one batch.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = eventlog.Read()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("batch_size=%v, total_events=%v, batch_time=%v, events_per_sec=%v, bytes_alloced_per_event=%v, total_allocs=%v",
|
||||
batchSize,
|
||||
result.N*batchSize,
|
||||
time.Duration(result.NsPerOp()),
|
||||
float64(batchSize)/time.Duration(result.NsPerOp()).Seconds(),
|
||||
humanize.Bytes(result.MemBytes/(uint64(result.N)*uint64(batchSize))),
|
||||
result.MemAllocs)
|
||||
}
|
||||
|
||||
benchTest(10)
|
||||
benchTest(100)
|
||||
benchTest(500)
|
||||
benchTest(1000)
|
||||
}
|
||||
|
||||
// Utility Functions
|
||||
|
||||
var rng = rand.NewSource(time.Now().UnixNano())
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
|
||||
// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
|
||||
func randString(n int) string {
|
||||
b := make([]byte, n)
|
||||
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
|
||||
for i, cache, remain := n-1, rng.Int63(), letterIdxMax; i >= 0; {
|
||||
if remain == 0 {
|
||||
cache, remain = rng.Int63(), letterIdxMax
|
||||
}
|
||||
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||
b[i] = letterBytes[idx]
|
||||
i--
|
||||
}
|
||||
cache >>= letterIdxBits
|
||||
remain--
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
26
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go
generated
vendored
26
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"github.com/elastic/beats/libbeat/common"
|
||||
"github.com/elastic/beats/libbeat/logp"
|
||||
@ -23,9 +24,14 @@ var (
|
||||
detailf = logp.MakeDebug(detailSelector)
|
||||
)
|
||||
|
||||
// dropReasons contains counters for the number of dropped events for each
|
||||
// reason.
|
||||
var dropReasons = expvar.NewMap("drop_reasons")
|
||||
var (
|
||||
// dropReasons contains counters for the number of dropped events for each
|
||||
// reason.
|
||||
dropReasons = expvar.NewMap("drop_reasons")
|
||||
|
||||
// readErrors contains counters for the read error types that occur.
|
||||
readErrors = expvar.NewMap("read_errors")
|
||||
)
|
||||
|
||||
// EventLog is an interface to a Windows Event Log.
|
||||
type EventLog interface {
|
||||
@ -177,3 +183,17 @@ func isZero(i interface{}) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// incrementMetric increments a value in the specified expvar.Map. The key
|
||||
// should be a windows syscall.Errno or a string. Any other types will be
|
||||
// reported under the "other" key.
|
||||
func incrementMetric(v *expvar.Map, key interface{}) {
|
||||
switch t := key.(type) {
|
||||
default:
|
||||
v.Add("other", 1)
|
||||
case string:
|
||||
v.Add(t, 1)
|
||||
case syscall.Errno:
|
||||
v.Add(strconv.Itoa(int(t)), 1)
|
||||
}
|
||||
}
|
||||
|
1
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go
generated
vendored
@ -195,6 +195,7 @@ func (l *eventLogging) Close() error {
|
||||
// by attempting to correct the error through closing and reopening the event
|
||||
// log.
|
||||
func (l *eventLogging) readRetryErrorHandler(err error) error {
|
||||
incrementMetric(readErrors, err)
|
||||
if errno, ok := err.(syscall.Errno); ok {
|
||||
var reopen bool
|
||||
|
||||
|
14
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go
generated
vendored
14
vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go
generated
vendored
@ -4,6 +4,8 @@ package eventlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -35,6 +37,8 @@ const (
|
||||
|
||||
const allLevels = elog.Success | elog.AuditFailure | elog.AuditSuccess | elog.Error | elog.Info | elog.Warning
|
||||
|
||||
const gigabyte = 1 << 30
|
||||
|
||||
// Test messages.
|
||||
var messages = map[uint32]struct {
|
||||
eventType uint16
|
||||
@ -72,7 +76,7 @@ var oneTimeLogpInit sync.Once
|
||||
func configureLogp() {
|
||||
oneTimeLogpInit.Do(func() {
|
||||
if testing.Verbose() {
|
||||
logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog", "eventlog_detail"})
|
||||
logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog"})
|
||||
logp.Info("DEBUG enabled for eventlog.")
|
||||
} else {
|
||||
logp.LogInit(logp.LOG_WARNING, "", false, true, []string{})
|
||||
@ -143,6 +147,14 @@ func uninstallLog(provider, source string, log *elog.Log) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// setLogSize set the maximum number of bytes that an event log can hold.
|
||||
func setLogSize(t testing.TB, provider string, sizeBytes int) {
|
||||
output, err := exec.Command("wevtutil.exe", "sl", "/ms:"+strconv.Itoa(sizeBytes), providerName).CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatal("failed to set log size", err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that all messages are read from the event log.
|
||||
func TestRead(t *testing.T) {
|
||||
|
||||
|
61
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go
generated
vendored
61
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go
generated
vendored
@ -4,7 +4,6 @@ package eventlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@ -13,6 +12,7 @@ import (
|
||||
"github.com/elastic/beats/winlogbeat/sys"
|
||||
win "github.com/elastic/beats/winlogbeat/sys/wineventlog"
|
||||
"github.com/joeshaw/multierror"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
@ -73,6 +73,7 @@ type winEventLog struct {
|
||||
channelName string // Name of the channel from which to read.
|
||||
subscription win.EvtHandle // Handle to the subscription.
|
||||
maxRead int // Maximum number returned in one Read.
|
||||
lastRead uint64 // Record number of the last read event.
|
||||
|
||||
render func(event win.EvtHandle) (string, error) // Function for rendering the event to XML.
|
||||
renderBuf []byte // Buffer used for rendering event.
|
||||
@ -118,13 +119,8 @@ func (l *winEventLog) Open(recordNumber uint64) error {
|
||||
}
|
||||
|
||||
func (l *winEventLog) Read() ([]Record, error) {
|
||||
handles, err := win.EventHandles(l.subscription, l.maxRead)
|
||||
if err == win.ERROR_NO_MORE_ITEMS {
|
||||
detailf("%s No more events", l.logPrefix)
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
logp.Warn("%s EventHandles returned error %v", l.logPrefix, err)
|
||||
handles, _, err := l.eventHandles(l.maxRead)
|
||||
if err != nil || len(handles) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
@ -145,17 +141,18 @@ func (l *winEventLog) Read() ([]Record, error) {
|
||||
}
|
||||
if err != nil && x == "" {
|
||||
logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err)
|
||||
reportDrop(err)
|
||||
incrementMetric(dropReasons, err)
|
||||
continue
|
||||
}
|
||||
|
||||
r, err := l.buildRecordFromXML(x, err)
|
||||
if err != nil {
|
||||
logp.Err("%s Dropping event. %v", l.logPrefix, err)
|
||||
reportDrop("unmarshal")
|
||||
incrementMetric(dropReasons, err)
|
||||
continue
|
||||
}
|
||||
records = append(records, r)
|
||||
l.lastRead = r.RecordID
|
||||
}
|
||||
|
||||
debugf("%s Read() is returning %d records", l.logPrefix, len(records))
|
||||
@ -167,6 +164,34 @@ func (l *winEventLog) Close() error {
|
||||
return win.Close(l.subscription)
|
||||
}
|
||||
|
||||
func (l *winEventLog) eventHandles(maxRead int) ([]win.EvtHandle, int, error) {
|
||||
handles, err := win.EventHandles(l.subscription, maxRead)
|
||||
switch err {
|
||||
case nil:
|
||||
if l.maxRead > maxRead {
|
||||
debugf("%s Recovered from RPC_S_INVALID_BOUND error (errno 1734) "+
|
||||
"by decreasing batch_read_size to %v", l.logPrefix, maxRead)
|
||||
}
|
||||
return handles, maxRead, nil
|
||||
case win.ERROR_NO_MORE_ITEMS:
|
||||
detailf("%s No more events", l.logPrefix)
|
||||
return nil, maxRead, nil
|
||||
case win.RPC_S_INVALID_BOUND:
|
||||
incrementMetric(readErrors, err)
|
||||
if err := l.Close(); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND")
|
||||
}
|
||||
if err := l.Open(l.lastRead); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND")
|
||||
}
|
||||
return l.eventHandles(maxRead / 2)
|
||||
default:
|
||||
incrementMetric(readErrors, err)
|
||||
logp.Warn("%s EventHandles returned error %v", l.logPrefix, err)
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, error) {
|
||||
e, err := sys.UnmarshalEventXML([]byte(x))
|
||||
if err != nil {
|
||||
@ -204,20 +229,6 @@ func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record,
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// reportDrop reports a dropped event log record and the reason as an expvar
|
||||
// metric. The reason should be a windows syscall.Errno or a string. Any other
|
||||
// types will be reported under the "other" key.
|
||||
func reportDrop(reason interface{}) {
|
||||
switch t := reason.(type) {
|
||||
default:
|
||||
dropReasons.Add("other", 1)
|
||||
case string:
|
||||
dropReasons.Add(t, 1)
|
||||
case syscall.Errno:
|
||||
dropReasons.Add(strconv.Itoa(int(t)), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// newWinEventLog creates and returns a new EventLog for reading event logs
|
||||
// using the Windows Event Log.
|
||||
func newWinEventLog(options map[string]interface{}) (EventLog, error) {
|
||||
@ -283,7 +294,7 @@ func newWinEventLog(options map[string]interface{}) (EventLog, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register eventlogging API if it is available.
|
||||
// Register wineventlog API if it is available.
|
||||
available, _ := win.IsAvailable()
|
||||
if available {
|
||||
Register(winEventLogAPIName, 0, newWinEventLog, win.Channels)
|
||||
|
69
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go
generated
vendored
69
vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go
generated
vendored
@ -3,8 +3,11 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
elog "github.com/andrewkroh/sys/windows/svc/eventlog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -52,3 +55,69 @@ func TestWinEventLogBatchReadSize(t *testing.T) {
|
||||
|
||||
assert.Len(t, records, batchReadSize)
|
||||
}
|
||||
|
||||
// TestReadLargeBatchSize tests reading from an event log using a large
|
||||
// read_batch_size parameter. When combined with large messages this causes
|
||||
// EvtNext (wineventlog.EventRecords) to fail with RPC_S_INVALID_BOUND error.
|
||||
func TestReadLargeBatchSize(t *testing.T) {
|
||||
configureLogp()
|
||||
log, err := initLog(providerName, sourceName, eventCreateMsgFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := uninstallLog(providerName, sourceName, log)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
setLogSize(t, providerName, gigabyte)
|
||||
|
||||
// Publish large test messages.
|
||||
totalEvents := 1000
|
||||
for i := 0; i < totalEvents; i++ {
|
||||
err = log.Report(elog.Info, uint32(i%1000), []string{strconv.Itoa(i) + " " + randString(31800)})
|
||||
if err != nil {
|
||||
t.Fatal("ReportEvent error", err)
|
||||
}
|
||||
}
|
||||
|
||||
eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": 1024})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = eventlog.Open(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := eventlog.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
var eventCount int
|
||||
for eventCount < totalEvents {
|
||||
records, err := eventlog.Read()
|
||||
if err != nil {
|
||||
t.Fatal("read error", err)
|
||||
}
|
||||
if len(records) == 0 {
|
||||
t.Fatal("read returned 0 records")
|
||||
}
|
||||
eventCount += len(records)
|
||||
}
|
||||
|
||||
t.Logf("number of records returned: %v", eventCount)
|
||||
|
||||
wineventlog := eventlog.(*winEventLog)
|
||||
assert.Equal(t, 1024, wineventlog.maxRead)
|
||||
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if kv.Key == "read_errors" {
|
||||
t.Log(kv)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
1
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go
generated
vendored
1
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go
generated
vendored
@ -13,6 +13,7 @@ const (
|
||||
ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
|
||||
ERROR_NO_MORE_ITEMS syscall.Errno = 259
|
||||
ERROR_NONE_MAPPED syscall.Errno = 1332
|
||||
RPC_S_INVALID_BOUND syscall.Errno = 1734
|
||||
ERROR_INVALID_OPERATION syscall.Errno = 4317
|
||||
ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027
|
||||
ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028
|
||||
|
4
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go
generated
vendored
4
vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go
generated
vendored
@ -125,6 +125,10 @@ func Subscribe(
|
||||
// handles available to return. Close must be called on each returned EvtHandle
|
||||
// when finished with the handle.
|
||||
func EventHandles(subscription EvtHandle, maxHandles int) ([]EvtHandle, error) {
|
||||
if maxHandles < 1 {
|
||||
return nil, fmt.Errorf("maxHandles must be greater than 0")
|
||||
}
|
||||
|
||||
eventHandles := make([]EvtHandle, maxHandles)
|
||||
var numRead uint32
|
||||
|
||||
|
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json
generated
vendored
@ -7,7 +7,7 @@
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json
generated
vendored
2
vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json
generated
vendored
@ -5,7 +5,7 @@
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.1.1"
|
||||
"version": "5.1.2"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user